code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return []
UpperCAmelCase_, UpperCAmelCase_ : int = min(__UpperCAmelCase ), max(__UpperCAmelCase )
UpperCAmelCase_ : int = int(max_value - min_value ) + 1
UpperCAmelCase_ : List[Any] = [[] for _ in range(__UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__UpperCAmelCase )
return [v for bucket in buckets for v in sorted(__UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 0 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
snake_case__ : Tuple = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
snake_case__ , snake_case__ : str = input_paths_and_base_extractors[compression_format]
if input_path is None:
snake_case__ : int = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCAmelCase )
assert base_extractor.is_extractable(__UpperCAmelCase )
snake_case__ : Dict = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case__ : Dict = file_path.read_text(encoding='utf-8' )
else:
snake_case__ : List[Any] = output_path.read_text(encoding='utf-8' )
snake_case__ : Optional[Any] = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case__ : Tuple = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
snake_case__ : Union[str, Any] = input_paths[compression_format]
if input_path is None:
snake_case__ : Optional[int] = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCAmelCase )
snake_case__ : List[str] = Extractor.infer_extractor_format(__UpperCAmelCase )
assert extractor_format is not None
snake_case__ : Any = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case__ : Dict = file_path.read_text(encoding='utf-8' )
else:
snake_case__ : Optional[Any] = output_path.read_text(encoding='utf-8' )
snake_case__ : List[str] = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
import tarfile
snake_case__ : Optional[int] = tmp_path / 'data_dot_dot'
directory.mkdir()
snake_case__ : Dict = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str:
import tarfile
snake_case__ : Dict = tmp_path / 'data_sym_link'
directory.mkdir()
snake_case__ : Union[str, Any] = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=__UpperCAmelCase )
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
snake_case__ : Union[str, Any] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
snake_case__ : Optional[int] = insecure_tar_files[insecure_tar_file]
snake_case__ : Optional[int] = tmp_path / 'extracted'
TarExtractor.extract(__UpperCAmelCase , __UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> int:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
snake_case__ : int = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
snake_case__ : List[str] = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(__UpperCAmelCase )
assert zipfile.is_zipfile(str(__UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCAmelCase ) # but we're right
| 270 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = self.vocab_size - 1
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , *A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = OpenAIGPTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , *A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , *A__ ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , *A__ ) -> int:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = OpenAIGPTForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class _a (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCamelCase ( self , A__ , A__ , A__=False ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE = inputs_dict["""labels"""]
_SCREAMING_SNAKE_CASE = inputs_dict["""labels"""]
_SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = OpenAIGPTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowerCAmelCase )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowerCAmelCase )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowerCAmelCase )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = OpenAIGPTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_lowerCAmelCase ) # the president is
_SCREAMING_SNAKE_CASE = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_SCREAMING_SNAKE_CASE = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 591 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ViTFeatureExtractor']
a_ = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 0 |
import math
import qiskit
def a_ (__A = 1 , __A = 1 , __A = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
__a : Optional[int] = qiskit.QuantumRegister(4 , "qr" )
__a : int = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
__a : Optional[int] = [input_a, input_a, carry_in]
__a : str = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCAmelCase ) # measure the last two qbits
__a : Any = qiskit.Aer.get_backend("aer_simulator" )
__a : Union[str, Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1_000 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 351 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_( self )-> Tuple:
lowercase__ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowercase__ = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_lowerCAmelCase )['''last_hidden_state''']
lowercase__ = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 161 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a ( _SCREAMING_SNAKE_CASE ):
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'num_heads' ) )
class __a :
def __init__( self , a__ , a__=13 , a__=64 , a__=3 , a__=[16, 48, 96] , a__=[1, 3, 6] , a__=[1, 2, 10] , a__=[7, 3, 3] , a__=[4, 2, 2] , a__=[2, 1, 1] , a__=[2, 2, 2] , a__=[False, False, True] , a__=[0.0, 0.0, 0.0] , a__=0.02 , a__=1e-12 , a__=True , a__=True , a__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_sizes
_lowerCamelCase = patch_stride
_lowerCamelCase = patch_padding
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = num_labels
_lowerCamelCase = num_channels
_lowerCamelCase = embed_dim
_lowerCamelCase = num_heads
_lowerCamelCase = stride_kv
_lowerCamelCase = depth
_lowerCamelCase = cls_token
_lowerCamelCase = attention_drop_rate
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
def snake_case_ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = TFCvtModel(config=_lowerCAmelCase )
_lowerCamelCase = model(_lowerCAmelCase , training=_lowerCAmelCase )
_lowerCamelCase = (self.image_size, self.image_size)
_lowerCamelCase , _lowerCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = TFCvtForImageClassification(_lowerCAmelCase )
_lowerCamelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : int = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Dict = False
def snake_case_ ( self ):
_lowerCamelCase = TFCvtModelTester(self )
_lowerCamelCase = TFCvtConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def snake_case_ ( self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def snake_case_ ( self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def snake_case_ ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def snake_case_ ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def snake_case_ ( self ):
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def snake_case_ ( self ):
_lowerCamelCase = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(_lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(_lowerCAmelCase )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case_ ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCamelCase = model_class(_lowerCAmelCase )
_lowerCamelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase = outputs.hidden_states
_lowerCamelCase = len(self.model_tester.depth )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = TFCvtModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( )-> str:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case_ ( self ):
_lowerCamelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=_lowerCAmelCase , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCamelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1e-4 ) )
| 650 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase_( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(
features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCamelCase = Generator(
cache_dir=_lowerCAmelCase , features=_lowerCAmelCase , generator=_lowerCAmelCase , gen_kwargs=_lowerCAmelCase , **_lowerCAmelCase , )
def snake_case__ ( self ):
# Build iterable dataset
if self.streaming:
_lowerCamelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
_lowerCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 661 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : int = start
__UpperCAmelCase : Tuple = end
__UpperCAmelCase : Optional[int] = val
__UpperCAmelCase : Union[str, Any] = (start + end) // 2
__UpperCAmelCase : Dict = left
__UpperCAmelCase : str = right
def __repr__( self : List[str]):
"""simple docstring"""
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class a__ :
def __init__( self : Union[str, Any] , UpperCamelCase_ : Sequence , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Any = collection
__UpperCAmelCase : Any = function
if self.collection:
__UpperCAmelCase : Optional[int] = self._build_tree(0 , len(_lowerCAmelCase) - 1)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
self._update_tree(self.root , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]):
"""simple docstring"""
return self._query_range(self.root , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_lowerCAmelCase , _lowerCAmelCase , self.collection[start])
__UpperCAmelCase : int = (start + end) // 2
__UpperCAmelCase : Dict = self._build_tree(_lowerCAmelCase , _lowerCAmelCase)
__UpperCAmelCase : List[Any] = self._build_tree(mid + 1 , _lowerCAmelCase)
return SegmentTreeNode(_lowerCAmelCase , _lowerCAmelCase , self.fn(left.val , right.val) , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str]):
"""simple docstring"""
if node.start == i and node.end == i:
__UpperCAmelCase : List[str] = val
return
if i <= node.mid:
self._update_tree(node.left , _lowerCAmelCase , _lowerCAmelCase)
else:
self._update_tree(node.right , _lowerCAmelCase , _lowerCAmelCase)
__UpperCAmelCase : Optional[int] = self.fn(node.left.val , node.right.val)
def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowerCAmelCase , _lowerCAmelCase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowerCAmelCase , node.mid) , self._query_range(node.right , node.mid + 1 , _lowerCAmelCase) , )
else:
# range in right child tree
return self._query_range(node.right , _lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : Optional[int]):
"""simple docstring"""
if self.root is not None:
__UpperCAmelCase : List[str] = Queue()
queue.put(self.root)
while not queue.empty():
__UpperCAmelCase : Optional[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 77 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( _SCREAMING_SNAKE_CASE ):
@slow
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__SCREAMING_SNAKE_CASE: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = bertabert.config.encoder.vocab_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = tokenizer.sep_token_id
__SCREAMING_SNAKE_CASE: Optional[int] = tokenizer.cls_token_id
__SCREAMING_SNAKE_CASE: Optional[Any] = 128
__SCREAMING_SNAKE_CASE: str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = train_dataset.select(range(32 ) )
__SCREAMING_SNAKE_CASE: List[Any] = val_dataset.select(range(16 ) )
__SCREAMING_SNAKE_CASE: List[Any] = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__SCREAMING_SNAKE_CASE: List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_lowerCAmelCase , max_length=512 )
__SCREAMING_SNAKE_CASE: List[str] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_lowerCAmelCase , max_length=128 )
__SCREAMING_SNAKE_CASE: Dict = inputs.input_ids
__SCREAMING_SNAKE_CASE: Dict = inputs.attention_mask
__SCREAMING_SNAKE_CASE: List[str] = outputs.input_ids
__SCREAMING_SNAKE_CASE: str = outputs.input_ids.copy()
__SCREAMING_SNAKE_CASE: Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__SCREAMING_SNAKE_CASE: List[str] = outputs.attention_mask
assert all(len(_lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = pred.label_ids
__SCREAMING_SNAKE_CASE: Union[str, Any] = pred.predictions
# all unnecessary tokens are removed
__SCREAMING_SNAKE_CASE: Any = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCAmelCase ) )] ) / len(_lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__SCREAMING_SNAKE_CASE: Dict = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__SCREAMING_SNAKE_CASE: Optional[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__SCREAMING_SNAKE_CASE: Dict = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE: int = SeqaSeqTrainingArguments(
output_dir=_lowerCAmelCase , per_device_train_batch_size=_lowerCAmelCase , per_device_eval_batch_size=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , evaluation_strategy='''steps''' , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__SCREAMING_SNAKE_CASE: int = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# start training
trainer.train()
| 202 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__A = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__A = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
lowercase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def _A ( lowercase__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
lowercase__ = json.load(_lowerCAmelCase )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
lowercase__ = merges_handle.read().split("""\n""" )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(_lowerCAmelCase )
lowercase__ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
lowercase__ = min(_lowerCAmelCase , key=lambda lowerCamelCase__ : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(_lowerCAmelCase ):
try:
lowercase__ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(_lowerCAmelCase )
lowercase__ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
lowercase__ = get_pairs(_lowerCAmelCase )
lowercase__ = """ """.join(_lowerCAmelCase )
lowercase__ = word
return word
def A__ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = []
for token in re.findall(self.pat , _lowerCAmelCase ):
lowercase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) )
return bpe_tokens
def A__ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.decoder.get(_lowerCAmelCase )
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = """""".join(_lowerCAmelCase )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> int:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
lowercase__ = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowercase__ = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> Any:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> int:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
lowercase__ = """ """ + text
return (text, kwargs)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
lowercase__ = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["""global_attention_mask"""] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
lowercase__ = len(_lowerCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 325 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 0 |
def lowerCamelCase__ ( _lowercase = 4000000 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_, UpperCAmelCase_ : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b, a + b
return sum(__UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A_ = NewType("DataClass", Any)
A_ = NewType("DataClassType", Any)
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> Callable[[str], Any]:
snake_case__ : List[Any] = {str(__UpperCAmelCase ): choice for choice in choices}
return lambda __SCREAMING_SNAKE_CASE : str_to_choice.get(__UpperCAmelCase , __UpperCAmelCase )
def UpperCamelCase__ ( *,
__SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case__ : Any = {}
if aliases is not None:
snake_case__ : int = aliases
if help is not None:
snake_case__ : Dict = help
return dataclasses.field(metadata=__UpperCAmelCase , default=__UpperCAmelCase , default_factory=__UpperCAmelCase , **__UpperCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
A_ = 4_2
def __init__( self : Any , __lowerCamelCase : Union[DataClassType, Iterable[DataClassType]] , **__lowerCamelCase : Dict ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
snake_case__ : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
snake_case__ : Tuple = [dataclass_types]
snake_case__ : Optional[Any] = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def _lowerCAmelCase ( __lowerCamelCase : ArgumentParser , __lowerCamelCase : dataclasses.Field ):
snake_case__ : Tuple = F"--{field.name}"
snake_case__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
snake_case__ : Dict = kwargs.pop('aliases' , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : str = [aliases]
snake_case__ : List[str] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F" Problem encountered in field '{field.name}'." )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
snake_case__ : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case__ : Optional[int] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case__ : Union[str, Any] = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case__ : Dict = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case__ : Dict = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
snake_case__ : Union[str, Any] = field.type.__args__
else:
snake_case__ : List[str] = [x.value for x in field.type]
snake_case__ : Any = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
snake_case__ : Optional[Any] = field.default
else:
snake_case__ : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case__ : Tuple = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
snake_case__ : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case__ : Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case__ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case__ : Tuple = '?'
# This is the value that will get picked if we do --field_name (without value)
snake_case__ : Tuple = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Any = field.type.__args__[0]
snake_case__ : List[Any] = '+'
if field.default_factory is not dataclasses.MISSING:
snake_case__ : str = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case__ : str = True
else:
snake_case__ : List[str] = field.type
if field.default is not dataclasses.MISSING:
snake_case__ : Union[str, Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case__ : str = field.default_factory()
else:
snake_case__ : List[Any] = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case__ : Union[str, Any] = False
parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **_lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : DataClassType ):
if hasattr(_lowerCAmelCase , '_argument_group_name' ):
snake_case__ : Dict = self.add_argument_group(dtype._argument_group_name )
else:
snake_case__ : Optional[Any] = self
try:
snake_case__ : int = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
snake_case__ : Dict = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
snake_case__ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : int=None , __lowerCamelCase : str=False , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case__ : int = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case__ : List[str] = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case__ , snake_case__ : Dict = args_file_parser.parse_known_args(args=_lowerCAmelCase )
snake_case__ : Optional[Any] = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
snake_case__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case__ : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case__ , snake_case__ : Optional[Any] = self.parse_known_args(args=_lowerCAmelCase )
snake_case__ : Optional[Any] = []
for dtype in self.dataclass_types:
snake_case__ : Optional[int] = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
snake_case__ : Optional[Any] = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Any = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def _lowerCAmelCase ( self : Any , __lowerCamelCase : Dict[str, Any] , __lowerCamelCase : bool = False ):
snake_case__ : Tuple = set(args.keys() )
snake_case__ : Optional[int] = []
for dtype in self.dataclass_types:
snake_case__ : List[Any] = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
snake_case__ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case__ : List[Any] = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}" )
return tuple(_lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
snake_case__ : Optional[Any] = json.loads(open_json_file.read() )
snake_case__ : Dict = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
snake_case__ : Dict = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 270 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a (_SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self , A__ = "▁" , A__ = True , A__ = "<unk>" , A__ = "</s>" , A__ = "<pad>" , ) -> Any:
_SCREAMING_SNAKE_CASE = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_SCREAMING_SNAKE_CASE = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_SCREAMING_SNAKE_CASE = token_dict["""token"""]
_SCREAMING_SNAKE_CASE = Tokenizer(Unigram() )
_SCREAMING_SNAKE_CASE = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
_SCREAMING_SNAKE_CASE = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
_SCREAMING_SNAKE_CASE = decoders.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
_SCREAMING_SNAKE_CASE = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase ( self , A__ , A__ = 80_00 , A__ = True , ) -> int:
_SCREAMING_SNAKE_CASE = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_SCREAMING_SNAKE_CASE = [files]
self._tokenizer.train(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase ( self , A__ , A__ = 80_00 , A__ = True , ) -> Tuple:
_SCREAMING_SNAKE_CASE = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
self._tokenizer.train_from_iterator(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = json.loads(self._tokenizer.to_str() )
_SCREAMING_SNAKE_CASE = self.special_tokens["""unk"""]["""id"""]
_SCREAMING_SNAKE_CASE = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
| 591 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ = concatenate_datasets
a_ = DownloadConfig
a_ = DownloadManager
a_ = DownloadMode
a_ = DownloadConfig
a_ = DownloadMode
a_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 417 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 0 |
import math
def a_ (__A , __A ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 351 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int = 1_0 , lowercase : int = 1_0_0_0 , lowercase : bool = True ) ->int:
"""simple docstring"""
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _lowerCAmelCase ( lowercase : int , lowercase : int ) ->int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def _lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : int ) ->None:
"""simple docstring"""
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowercase__ = lower
lowercase__ = higher
lowercase__ = []
while True:
lowercase__ = get_avg(__UpperCAmelCase , __UpperCAmelCase )
last_numbers.append(__UpperCAmelCase )
if answer(__UpperCAmelCase ) == "low":
lowercase__ = number
elif answer(__UpperCAmelCase ) == "high":
lowercase__ = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _lowerCAmelCase ( ) ->None:
"""simple docstring"""
lowercase__ = int(input('''Enter lower value : ''' ).strip() )
lowercase__ = int(input('''Enter high value : ''' ).strip() )
lowercase__ = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 161 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[Any] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 650 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase_( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ : Optional[int] = 'nat'
lowercase__ : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=6_4 , lowerCamelCase__=[3, 4, 6, 5] , lowerCamelCase__=[2, 4, 8, 1_6] , lowerCamelCase__=7 , lowerCamelCase__=3.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(**_lowerCAmelCase )
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = len(_lowerCAmelCase )
_lowerCamelCase = num_heads
_lowerCamelCase = kernel_size
_lowerCamelCase = mlp_ratio
_lowerCamelCase = qkv_bias
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 661 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 0 |
"""simple docstring"""
from functools import reduce
A = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCamelCase ( UpperCamelCase = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(__UpperCAmelCase ) * int(__UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 0 |
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 0 |
'''simple docstring'''
def _A ( lowercase__ = 10 , lowercase__ = 22 ):
lowercase__ = range(1 , __UpperCAmelCase )
lowercase__ = range(1 , __UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 325 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 0 |
from pathlib import Path
import fire
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Path(__UpperCAmelCase )
UpperCAmelCase_ : str = Path(__UpperCAmelCase )
dest_dir.mkdir(exist_ok=__UpperCAmelCase )
for path in src_dir.iterdir():
UpperCAmelCase_ : Dict = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCAmelCase_ : Optional[Any] = dest_dir.joinpath(path.name )
print(__UpperCAmelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 0 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = DownBlockaD # noqa F405
A_ = "down"
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : List[Any] = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = ResnetDownsampleBlockaD # noqa F405
A_ = "down"
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnDownBlockaD # noqa F405
A_ = "down"
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : str = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = CrossAttnDownBlockaD # noqa F405
A_ = "down"
def _lowerCAmelCase ( self : Tuple ):
snake_case__ , snake_case__ : List[Any] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Any = 32
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Tuple = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = SimpleCrossAttnDownBlockaD # noqa F405
A_ = "down"
@property
def _lowerCAmelCase ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : int = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Tuple = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Dict = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = SkipDownBlockaD # noqa F405
A_ = "down"
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : int = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnSkipDownBlockaD # noqa F405
A_ = "down"
@property
def _lowerCAmelCase ( self : int ):
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[Any] = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = DownEncoderBlockaD # noqa F405
A_ = "down"
@property
def _lowerCAmelCase ( self : List[str] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = {
'in_channels': 32,
'out_channels': 32,
}
snake_case__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Dict = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnDownEncoderBlockaD # noqa F405
A_ = "down"
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : str = {
'in_channels': 32,
'out_channels': 32,
}
snake_case__ : List[str] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Dict = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = UNetMidBlockaD # noqa F405
A_ = "mid"
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : int = {
'in_channels': 32,
'temb_channels': 128,
}
snake_case__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Any ):
snake_case__ : Optional[int] = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = UNetMidBlockaDCrossAttn # noqa F405
A_ = "mid"
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ , snake_case__ : Tuple = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Optional[Any] = 32
return init_dict, inputs_dict
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Optional[int] = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = UNetMidBlockaDSimpleCrossAttn # noqa F405
A_ = "mid"
@property
def _lowerCAmelCase ( self : List[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : int = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Tuple = 32
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Any = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = UpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Dict = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = ResnetUpsampleBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : int = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = CrossAttnUpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ , snake_case__ : Any = super().prepare_init_args_and_inputs_for_common()
snake_case__ : List[Any] = 32
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : int = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = SimpleCrossAttnUpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase )
def _lowerCAmelCase ( self : Dict ):
snake_case__ , snake_case__ : Tuple = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Union[str, Any] = 32
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnUpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _lowerCAmelCase ( self : Any ):
snake_case__ : Any = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = SkipUpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Tuple = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnSkipUpBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Dict ):
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Optional[int] = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = UpDecoderBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : List[str] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
snake_case__ : Any = {'in_channels': 32, 'out_channels': 32}
snake_case__ : str = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Dict = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(_lowerCAmelCase )
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = AttnUpDecoderBlockaD # noqa F405
A_ = "up"
@property
def _lowerCAmelCase ( self : Optional[int] ):
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : List[Any] = {'in_channels': 32, 'out_channels': 32}
snake_case__ : str = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : int ):
snake_case__ : Any = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(_lowerCAmelCase )
| 270 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCamelCase__ : Any = 100
UpperCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __UpperCAmelCase ):
if len(partition(__UpperCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 591 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowercase ( lowerCamelCase : str ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowercase ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase_ : Optional[int] = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def __lowercase ( lowerCamelCase : Tuple ):
UpperCamelCase_ : Optional[int] = [1, 2]
UpperCamelCase_ : Dict = {'a': 1, 'b': 2}
UpperCamelCase_ : Union[str, Any] = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_ : List[str] = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_ : Union[str, Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_ : Any = [2, 3]
UpperCamelCase_ : Any = {'a': 2, 'b': 3}
UpperCamelCase_ : List[Any] = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_ : Optional[int] = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_ : List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
| 417 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def a_ (__A=None , __A=None ) -> str:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
snake_case__ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
snake_case__ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Use FP16 to accelerate inference."""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Benchmark training of model"""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Verbose memory tracing"""} )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Trace memory line by line"""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Save result to a CSV file"""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Save all print statements in a log file"""} )
snake_case__ = field(default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to print environment information"""} )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
snake_case__ = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
snake_case__ = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
snake_case__ = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
snake_case__ = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
snake_case__ = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
snake_case__ = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
snake_case__ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
snake_case__ = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase__ (self: List[str] ) -> int:
'''simple docstring'''
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _lowerCAmelCase , )
def UpperCAmelCase__ (self: str ) -> Optional[int]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ (self: Dict ) -> Optional[int]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def UpperCAmelCase__ (self: Any ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 351 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 6_0_0 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.2_5 , _lowerCamelCase = "swish" , _lowerCamelCase = 2_5_6_0 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.0_2 , _lowerCamelCase = 0.0_0_1 , _lowerCamelCase = 0.9_9 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , )-> Dict:
super().__init__(**_lowerCAmelCase )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = width_coefficient
lowercase__ = depth_coefficient
lowercase__ = depth_divisor
lowercase__ = kernel_sizes
lowercase__ = in_channels
lowercase__ = out_channels
lowercase__ = depthwise_padding
lowercase__ = strides
lowercase__ = num_block_repeats
lowercase__ = expand_ratios
lowercase__ = squeeze_expansion_ratio
lowercase__ = hidden_act
lowercase__ = hidden_dim
lowercase__ = pooling_type
lowercase__ = initializer_range
lowercase__ = batch_norm_eps
lowercase__ = batch_norm_momentum
lowercase__ = dropout_rate
lowercase__ = drop_connect_rate
lowercase__ = sum(_lowerCAmelCase ) * 4
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = version.parse('1.11' )
@property
def snake_case_( self )-> int:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_( self )-> Any:
return 1e-5
| 161 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 200 )-> int:
_lowerCamelCase = [1, 2, 5, 10, 20, 50, 100, 200]
_lowerCamelCase = [0] * (pence + 1)
_lowerCamelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 650 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = ['names', 'prefix']
__SCREAMING_SNAKE_CASE : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__SCREAMING_SNAKE_CASE : Optional[int] = ['encoding_errors', 'on_bad_lines']
__SCREAMING_SNAKE_CASE : str = ['date_format']
@dataclass
class lowerCamelCase_( datasets.BuilderConfig ):
'''simple docstring'''
lowercase__ : Optional[int] = ','
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = 'infer'
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : List[Any] = True
lowercase__ : str = None
lowercase__ : Dict = None
lowercase__ : str = None
lowercase__ : Dict = None
lowercase__ : Optional[int] = False
lowercase__ : int = None
lowercase__ : List[str] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = True
lowercase__ : List[Any] = False
lowercase__ : str = True
lowercase__ : List[Any] = None
lowercase__ : Dict = '.'
lowercase__ : int = None
lowercase__ : Tuple = '"'
lowercase__ : Union[str, Any] = 0
lowercase__ : List[Any] = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : List[Any] = None
lowercase__ : str = True
lowercase__ : Dict = True
lowercase__ : int = 0
lowercase__ : int = True
lowercase__ : List[Any] = False
lowercase__ : Tuple = None
lowercase__ : str = 10_000
lowercase__ : str = None
lowercase__ : int = 'strict'
lowercase__ : Optional[int] = 'error'
lowercase__ : Tuple = None
def snake_case__ ( self ):
if self.delimiter is not None:
_lowerCamelCase = self.delimiter
if self.column_names is not None:
_lowerCamelCase = self.column_names
@property
def snake_case__ ( self ):
_lowerCamelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase_( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase__ : Any = CsvConfig
def snake_case__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self , lowerCamelCase__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
_lowerCamelCase = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase = [files]
_lowerCamelCase = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase = [files]
_lowerCamelCase = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def snake_case__ ( self , lowerCamelCase__ ):
if self.config.features is not None:
_lowerCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCamelCase = table_cast(_lowerCAmelCase , _lowerCAmelCase )
return pa_table
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
_lowerCamelCase = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
_lowerCamelCase = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}""" )
raise
| 661 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase_ = VQModel
lowercase_ = "sample"
@property
def a_ ( self : Dict , UpperCamelCase_ : str=(32, 32)):
"""simple docstring"""
__UpperCAmelCase : Any = 4
__UpperCAmelCase : int = 3
__UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes).to(_lowerCAmelCase)
return {"sample": image}
@property
def a_ ( self : str):
"""simple docstring"""
return (3, 32, 32)
@property
def a_ ( self : List[str]):
"""simple docstring"""
return (3, 32, 32)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Tuple):
"""simple docstring"""
pass
def a_ ( self : List[str]):
"""simple docstring"""
pass
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(_lowerCAmelCase)
__UpperCAmelCase : Optional[int] = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(_lowerCAmelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__UpperCAmelCase : List[str] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__UpperCAmelCase : str = image.to(_lowerCAmelCase)
with torch.no_grad():
__UpperCAmelCase : Dict = model(_lowerCAmelCase).sample
__UpperCAmelCase : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3))
| 77 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 202 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = """git_vision_model"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=3_072 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , **lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def A__ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
lowercase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase : int = """git"""
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=6 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_024 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=101 , lowerCamelCase__=102 , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
if vision_config is None:
lowercase__ = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
lowercase__ = GitVisionConfig(**_lowerCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 325 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 0 |
class __a: # Public class to implement a graph
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Any = row
UpperCAmelCase_ : List[str] = col
UpperCAmelCase_ : List[str] = graph
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
# Checking all 8 elements surrounding nth element
UpperCAmelCase_ : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase_ : Optional[int] = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase_ : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,_lowerCAmelCase ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,_lowerCAmelCase )
def a__ ( self ) -> Dict: # And finally, count all islands.
UpperCAmelCase_ : Dict = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase_ : List[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
count += 1
return count | 30 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'num_attention_heads' ) )
class lowercase_ :
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : str=32 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : List[str]=640 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Dict="silu" , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=32 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=10 , __lowerCamelCase : str=None , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = image_size
snake_case__ : int = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Optional[Any] = last_hidden_size
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Any = hidden_act
snake_case__ : Dict = conv_kernel_size
snake_case__ : List[Any] = output_stride
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : int = classifier_dropout_prob
snake_case__ : str = use_labels
snake_case__ : Any = is_training
snake_case__ : int = num_labels
snake_case__ : int = initializer_range
snake_case__ : Tuple = scope
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Optional[int] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
snake_case__ : Optional[int] = MobileViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case__ : int = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case__ : str = self.num_labels
snake_case__ : List[str] = MobileViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case__ : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : List[Any] = MobileViTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case__ : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : int = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Tuple = MobileViTModelTester(self )
snake_case__ : List[str] = MobileViTConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def _lowerCAmelCase ( self : List[str] ):
pass
def _lowerCAmelCase ( self : int ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(_lowerCAmelCase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : int ):
pass
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(__lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
snake_case__ : List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case__ : Any = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Tuple = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Any = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def _lowerCAmelCase ( self : Dict ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] = MobileViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[Any] ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : List[str] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_lowerCAmelCase )
snake_case__ : Dict = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**_lowerCAmelCase )
# verify the logits
snake_case__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
snake_case__ : List[str] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : Any ):
snake_case__ : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : List[Any] = model.to(_lowerCAmelCase )
snake_case__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : str = prepare_img()
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**_lowerCAmelCase )
snake_case__ : List[Any] = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCAmelCase )
snake_case__ : Any = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : Union[str, Any] = model.to(_lowerCAmelCase )
snake_case__ : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Tuple = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[Any] = model(**_lowerCAmelCase )
snake_case__ : Dict = outputs.logits.detach().cpu()
snake_case__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] )
snake_case__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
snake_case__ : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 270 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _a (_SCREAMING_SNAKE_CASE):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-12 , A__=[5_12, 8_64] , A__=16 , A__=3 , A__=True , A__=1_00 , A__=True , A__=False , A__=1 , A__=5 , A__=2 , A__=5 , A__=2 , A__=0.1 , **A__ , ) -> int:
super().__init__(**_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = num_detection_tokens
_SCREAMING_SNAKE_CASE = use_mid_position_embeddings
_SCREAMING_SNAKE_CASE = auxiliary_loss
# Hungarian matcher
_SCREAMING_SNAKE_CASE = class_cost
_SCREAMING_SNAKE_CASE = bbox_cost
_SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE = giou_loss_coefficient
_SCREAMING_SNAKE_CASE = eos_coefficient
class _a (_SCREAMING_SNAKE_CASE):
"""simple docstring"""
SCREAMING_SNAKE_CASE = version.parse('1.11')
@property
def UpperCamelCase ( self ) -> List[Any]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self ) -> List[str]:
return 1E-4
@property
def UpperCamelCase ( self ) -> List[Any]:
return 12
| 591 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 0 |
def __lowercase ( lowerCamelCase : list[list[int | float]] ):
UpperCamelCase_ : Optional[Any] = len(__UpperCAmelCase )
UpperCamelCase_ : Dict = len(matrix[0] )
UpperCamelCase_ : List[Any] = min(__UpperCAmelCase , __UpperCAmelCase )
for row in range(__UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCAmelCase ):
UpperCamelCase_ : Tuple = matrix[col][row] / matrix[row][row]
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ : Optional[Any] = True
for i in range(row + 1 , __UpperCAmelCase ):
if matrix[i][row] != 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = matrix[i], matrix[row]
UpperCamelCase_ : Tuple = False
break
if reduce:
rank -= 1
for i in range(__UpperCAmelCase ):
UpperCamelCase_ : List[str] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
UpperCAmelCase__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
UpperCAmelCase__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
UpperCAmelCase__ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
UpperCAmelCase__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class snake_case_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = ["""input_ids"""]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__(self: List[str] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int=None , __UpperCAmelCase: Tuple=False , __UpperCAmelCase: List[Any]="utf8" , __UpperCAmelCase: Tuple="[UNK]" , __UpperCAmelCase: Tuple="[SEP]" , __UpperCAmelCase: Dict="[PAD]" , __UpperCAmelCase: str="[CLS]" , __UpperCAmelCase: Dict="[MASK]" , __UpperCAmelCase: Optional[Dict[str, Any]] = None , **__UpperCAmelCase: Tuple , ) -> Optional[int]:
'''simple docstring'''
__a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__a : Optional[Any] = do_lower_case
__a : int = sentencepiece_model_ckpt
__a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__a : Tuple = self.load_vocab(filepath=_lowerCAmelCase )
else:
__a : Any = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__a : Optional[Any] = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ (self: List[Any] , __UpperCAmelCase: Tuple ) -> List[str]:
'''simple docstring'''
if text is None:
return None
__a : Dict = self.tokenize(_lowerCAmelCase )
__a , __a : Dict = "", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__a : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__a : Optional[Any] = unicodedata.normalize("NFKC" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__a , __a , __a : Optional[Any] = normalized_text, [], 0
if self.do_lower_case:
__a : Optional[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__a : str = token[1:]
__a : Dict = text[offset:].index(_lowerCAmelCase ) + offset
__a : str = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__a : Optional[Any] = end
return token_mapping
@property
def UpperCAmelCase__ (self: Optional[Any] ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ (self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self: List[str] ) -> List[Any]:
'''simple docstring'''
__a : Optional[int] = self.__dict__.copy()
__a : str = None
return state
def __setstate__(self: int , __UpperCAmelCase: Union[str, Any] ) -> Dict:
'''simple docstring'''
__a : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__a : Dict = {}
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ (self: Tuple , __UpperCAmelCase: List[Any] ) -> int:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[int]=False , __UpperCAmelCase: int=64 , __UpperCAmelCase: Tuple=0.1 ) -> str:
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
__a : Tuple = True
if self.sp_model_kwargs.get("alpha" ) is not None:
__a : Dict = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
__a : Tuple = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
__a : Optional[Any] = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__a : str = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a : Tuple = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__a : str = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__a : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a : int = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__a : List[Any] = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ (self: Union[str, Any] , __UpperCAmelCase: Optional[Any] ) -> str:
'''simple docstring'''
__a : List[Any] = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def UpperCAmelCase__ (self: Optional[Any] , __UpperCAmelCase: Any ) -> Union[str, Any]:
'''simple docstring'''
__a : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__a : List[str] = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def UpperCAmelCase__ (self: Union[str, Any] , __UpperCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: List[str] ) -> int:
'''simple docstring'''
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
__a : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str=None ) -> Optional[Any]:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: List[str] , __UpperCAmelCase: str=None , __UpperCAmelCase: Any=False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: List[int] , __UpperCAmelCase: Optional[List[int]] = None ) -> Any:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: Any ) -> Any:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: Dict ) -> List[str]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ (self: str , __UpperCAmelCase: Union[str, Any] ) -> Tuple:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__a : int = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ (self: int , __UpperCAmelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
__a : Any = {}
with io.open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__a : Dict = line.rstrip("\n" )
__a : List[Any] = int(_lowerCAmelCase )
return token_to_idx
def UpperCAmelCase__ (self: str , __UpperCAmelCase: str , __UpperCAmelCase: Optional[str] = None ) -> str:
'''simple docstring'''
__a : Dict = 0
if os.path.isdir(_lowerCAmelCase ):
__a : Union[str, Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__a : Any = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
__a : Union[str, Any] = token_index
writer.write(token + "\n" )
index += 1
__a : List[Any] = os.path.join(_lowerCAmelCase , "sentencepiece.bpe.model" )
with open(_lowerCAmelCase , "wb" ) as fi:
__a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 351 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : list , lowercase : int , lowercase : int = 0 , lowercase : int = 0 ) ->int:
"""simple docstring"""
lowercase__ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A_ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Optional[Any] ='\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __a ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Any = 42
class __a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
if latents is None:
_lowerCamelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase = latents.to(_lowerCAmelCase )
_lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
_lowerCamelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def snake_case_ ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def snake_case_ ( self , a__ , a__ , a__ , a__ , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
_lowerCamelCase = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCamelCase = self.image_processor(_lowerCAmelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
_lowerCamelCase = self.image_encoder(_lowerCAmelCase )['last_hidden_state']
_lowerCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self , a__ , a__ = 1 , a__ = 25 , a__ = None , a__ = None , a__ = 4.0 , a__ = 64 , a__ = "pil" , a__ = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowerCamelCase = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCamelCase = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase = len(_lowerCAmelCase )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}' )
_lowerCamelCase = self._execution_device
_lowerCamelCase = batch_size * num_images_per_prompt
_lowerCamelCase = guidance_scale > 1.0
_lowerCamelCase = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
_lowerCamelCase = self.scheduler.timesteps
_lowerCamelCase = self.prior.config.num_embeddings
_lowerCamelCase = self.prior.config.embedding_dim
_lowerCamelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase = noise_pred.chunk(2 )
_lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
_lowerCamelCase = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
_lowerCamelCase = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_lowerCAmelCase )
_lowerCamelCase = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 650 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 2_0
_lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
_lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCamelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCamelCase = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create ramp distribution
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
_lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCamelCase = 5
_lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
_lowerCamelCase = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
_lowerCamelCase = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowerCamelCase = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCamelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
_lowerCamelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_lowerCamelCase = 5
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = 1_5
_lowerCamelCase = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
_lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_lowerCamelCase = 1
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = 5
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_lowerCamelCase = 4
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = 1_0
# no processor list
_lowerCamelCase = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCamelCase = 1_0
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
_lowerCamelCase = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
_lowerCamelCase = jax.jit(_lowerCAmelCase )
_lowerCamelCase = jax.jit(_lowerCAmelCase )
_lowerCamelCase = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 661 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class a__ ( _SCREAMING_SNAKE_CASE ):
lowercase_ = "openai-gpt"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=40478 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : int=768 , UpperCamelCase_ : str=12 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any="cls_index" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : List[str]=0.1 , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[str] = n_positions
__UpperCAmelCase : Union[str, Any] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : List[Any] = afn
__UpperCAmelCase : Tuple = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : Tuple = attn_pdrop
__UpperCAmelCase : List[str] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Union[str, Any] = summary_type
__UpperCAmelCase : List[Any] = summary_use_proj
__UpperCAmelCase : List[str] = summary_activation
__UpperCAmelCase : List[Any] = summary_first_dropout
__UpperCAmelCase : Optional[Any] = summary_proj_to_labels
super().__init__(**_lowerCAmelCase)
| 77 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( _SCREAMING_SNAKE_CASE ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , '''depth_multiplier''' ) )
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=0.25 , _lowerCAmelCase=8 , _lowerCAmelCase=8 , _lowerCAmelCase=6 , _lowerCAmelCase=32 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu6" , _lowerCAmelCase=1280 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = parent
__SCREAMING_SNAKE_CASE: str = batch_size
__SCREAMING_SNAKE_CASE: Tuple = num_channels
__SCREAMING_SNAKE_CASE: List[str] = image_size
__SCREAMING_SNAKE_CASE: List[str] = depth_multiplier
__SCREAMING_SNAKE_CASE: str = depth_divisible_by
__SCREAMING_SNAKE_CASE: Union[str, Any] = min_depth
__SCREAMING_SNAKE_CASE: Union[str, Any] = expand_ratio
__SCREAMING_SNAKE_CASE: Optional[int] = tf_padding
__SCREAMING_SNAKE_CASE: Tuple = output_stride
__SCREAMING_SNAKE_CASE: Tuple = first_layer_is_expansion
__SCREAMING_SNAKE_CASE: Optional[Any] = finegrained_output
__SCREAMING_SNAKE_CASE: List[str] = hidden_act
__SCREAMING_SNAKE_CASE: List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE: str = classifier_dropout_prob
__SCREAMING_SNAKE_CASE: List[str] = use_labels
__SCREAMING_SNAKE_CASE: Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[Any] = initializer_range
__SCREAMING_SNAKE_CASE: Tuple = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE: Dict = None
__SCREAMING_SNAKE_CASE: str = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE: List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = MobileNetVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.num_labels
__SCREAMING_SNAKE_CASE: List[str] = MobileNetVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE: List[str] = MobileNetVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = config_and_inputs
__SCREAMING_SNAKE_CASE: Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE: Optional[Any] = MobileNetVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def snake_case_ ( self ):
"""simple docstring"""
pass
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: List[str] = model_class(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE: Optional[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE: Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Optional[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = outputs.hidden_states
__SCREAMING_SNAKE_CASE: str = 16
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE: str = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE: List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE: Tuple = MobileNetVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE: str = prepare_img()
__SCREAMING_SNAKE_CASE: List[Any] = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: str = model(**_lowerCAmelCase )
# verify the logits
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
__SCREAMING_SNAKE_CASE: Dict = model.to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = prepare_img()
__SCREAMING_SNAKE_CASE: List[str] = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE: List[Any] = model(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE: str = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 202 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 0 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
lowercase__ = word.split()
def justify(lowercase__ , lowercase__ , lowercase__ ) -> str:
lowercase__ = max_width - width
lowercase__ = len(__UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
lowercase__ = []
for i in range(__UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCAmelCase )
lowercase__ = []
lowercase__ = []
lowercase__ = 0
for word in words:
if width + len(__UpperCAmelCase ) + len(__UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCAmelCase )
width += len(__UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
# reset new line and new width
lowercase__ , lowercase__ = [word], len(__UpperCAmelCase )
lowercase__ = max_width - width - len(__UpperCAmelCase )
answer.append(""" """.join(__UpperCAmelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 325 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] ,model_result['''ss'''] ):
UpperCAmelCase_ : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
UpperCAmelCase_ : Any = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : int = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : int = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : List[Any] = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
UpperCAmelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : List[str] = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
UpperCAmelCase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Any = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : int = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
UpperCAmelCase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a__ ( self ) -> int:
UpperCAmelCase_ : Any = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Any = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
UpperCAmelCase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 ,'''Cannot do xla on CPU.''' )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,'''inf_time.csv''' ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,'''inf_mem.csv''' ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,'''env.csv''' ) ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,'''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,'''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,'''env.csv''' ) ).exists() )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_lowerCAmelCase ,'''sequential''' ) )
self.assertTrue(hasattr(_lowerCAmelCase ,'''cumulative''' ) )
self.assertTrue(hasattr(_lowerCAmelCase ,'''current''' ) )
self.assertTrue(hasattr(_lowerCAmelCase ,'''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,'''log.txt''' ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
UpperCAmelCase_ : Union[str, Any] = TensorFlowBenchmark(_lowerCAmelCase )
UpperCAmelCase_ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,'''log.txt''' ) ).exists() ) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase_ :
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=False , ):
snake_case__ : Union[str, Any] = 4
snake_case__ : Optional[Any] = 32
snake_case__ : Tuple = (32, 32)
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : List[Any] = torch.device(_lowerCAmelCase )
snake_case__ : Any = (batch_size, num_channels) + sizes
snake_case__ : Union[str, Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
snake_case__ : Any = {'hidden_states': hidden_states}
if include_temb:
snake_case__ : Tuple = 128
snake_case__ : int = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
snake_case__ : Any = torch.manual_seed(1 )
snake_case__ : List[str] = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
snake_case__ : str = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
snake_case__ : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Dict = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
snake_case__ : Any = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] ):
snake_case__ , snake_case__ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
snake_case__ : Union[str, Any] = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
snake_case__ : List[str] = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case__ : int = output[0, -1, -3:, -3:]
snake_case__ : int = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
snake_case__ : int = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
snake_case__ : Optional[Any] = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Tuple = output[0]
snake_case__ : Dict = torch.device(_lowerCAmelCase )
snake_case__ : str = randn_tensor(output.shape , device=_lowerCAmelCase )
snake_case__ : Optional[int] = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward()
| 270 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a (metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['transformers', 'torch', 'note_seq']
def __init__( self , *A__ , **A__ ) -> Optional[int]:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Tuple:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 591 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 0 |
from __future__ import annotations
from collections import namedtuple
def __lowercase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
UpperCamelCase_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutLMv2FeatureExtractor']
UpperCAmelCase__ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 0 |
"""simple docstring"""
import os
import string
import sys
A_ : Any =1 << 8
A_ : Optional[int] ={
'tab': ord("""\t"""),
'newline': ord("""\r"""),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
A_ : Dict =KEYMAP['up']
A_ : int =KEYMAP['left']
if sys.platform == "win32":
A_ : Any =[]
A_ : List[str] ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
A_ : Union[str, Any] =ord(str(i))
def SCREAMING_SNAKE_CASE_ ( )-> Dict:
if os.name == "nt":
import msvcrt
_lowerCamelCase = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
_lowerCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase = cha[1]
else:
_lowerCamelCase = ch.decode(__UpperCAmelCase )
else:
_lowerCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase = sys.stdin.fileno()
_lowerCamelCase = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
_lowerCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def SCREAMING_SNAKE_CASE_ ( )-> List[str]:
_lowerCamelCase = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
_lowerCamelCase = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
_lowerCamelCase = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 650 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__SCREAMING_SNAKE_CASE : Dict = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__SCREAMING_SNAKE_CASE : Any = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__SCREAMING_SNAKE_CASE : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__SCREAMING_SNAKE_CASE : str = F"""down_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__SCREAMING_SNAKE_CASE : Optional[Any] = F"""down_blocks.{i}.attentions.{j}."""
__SCREAMING_SNAKE_CASE : str = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__SCREAMING_SNAKE_CASE : List[Any] = F"""up_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__SCREAMING_SNAKE_CASE : Tuple = F"""up_blocks.{i}.attentions.{j}."""
__SCREAMING_SNAKE_CASE : Dict = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""down_blocks.{i}.downsamplers.0.conv."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__SCREAMING_SNAKE_CASE : Optional[int] = F"""up_blocks.{i}.upsamplers.0."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__SCREAMING_SNAKE_CASE : Dict = 'mid_block.attentions.0.'
__SCREAMING_SNAKE_CASE : Any = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""mid_block.resnets.{j}."""
__SCREAMING_SNAKE_CASE : int = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase_( lowercase_ : Tuple ) -> int:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
_lowerCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCamelCase = v.replace(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCamelCase = v.replace(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = v
_lowerCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__SCREAMING_SNAKE_CASE : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoder.down_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__SCREAMING_SNAKE_CASE : int = F"""down_blocks.{i}.downsamplers.0."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__SCREAMING_SNAKE_CASE : List[Any] = F"""up_blocks.{i}.upsamplers.0."""
__SCREAMING_SNAKE_CASE : int = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__SCREAMING_SNAKE_CASE : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
__SCREAMING_SNAKE_CASE : Any = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__SCREAMING_SNAKE_CASE : List[str] = F"""mid_block.resnets.{i}."""
__SCREAMING_SNAKE_CASE : str = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__SCREAMING_SNAKE_CASE : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase_( lowercase_ : Tuple ) -> List[str]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase_( lowercase_ : List[str] ) -> int:
_lowerCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCamelCase = v.replace(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCamelCase = v.replace(__UpperCAmelCase , __UpperCAmelCase )
_lowerCamelCase = v
_lowerCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
_lowerCamelCase = reshape_weight_for_sd(__UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__SCREAMING_SNAKE_CASE : str = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__SCREAMING_SNAKE_CASE : Tuple = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__SCREAMING_SNAKE_CASE : str = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__SCREAMING_SNAKE_CASE : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase_( lowercase_ : List[str] ) -> Union[str, Any]:
_lowerCamelCase = {}
_lowerCamelCase = {}
_lowerCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_lowerCamelCase = k[: -len('''.q_proj.weight''' )]
_lowerCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_lowerCamelCase = [None, None, None]
_lowerCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_lowerCamelCase = k[: -len('''.q_proj.bias''' )]
_lowerCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_lowerCamelCase = [None, None, None]
_lowerCamelCase = v
continue
_lowerCamelCase = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
_lowerCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_lowerCamelCase = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
_lowerCamelCase = torch.cat(__UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_lowerCamelCase = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
_lowerCamelCase = torch.cat(__UpperCAmelCase )
return new_state_dict
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
__SCREAMING_SNAKE_CASE : Dict = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
__SCREAMING_SNAKE_CASE : List[str] = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__SCREAMING_SNAKE_CASE : Any = load_file(unet_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE : List[str] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
__SCREAMING_SNAKE_CASE : Any = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
__SCREAMING_SNAKE_CASE : Tuple = load_file(vae_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE : Any = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
__SCREAMING_SNAKE_CASE : str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
__SCREAMING_SNAKE_CASE : List[str] = load_file(text_enc_path, device='''cpu''')
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
__SCREAMING_SNAKE_CASE : int = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
__SCREAMING_SNAKE_CASE : List[str] = convert_unet_state_dict(unet_state_dict)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__SCREAMING_SNAKE_CASE : List[Any] = convert_vae_state_dict(vae_state_dict)
__SCREAMING_SNAKE_CASE : int = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__SCREAMING_SNAKE_CASE : str = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__SCREAMING_SNAKE_CASE : int = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
__SCREAMING_SNAKE_CASE : Any = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__SCREAMING_SNAKE_CASE : List[str] = convert_text_enc_state_dict(text_enc_dict)
__SCREAMING_SNAKE_CASE : Any = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__SCREAMING_SNAKE_CASE : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__SCREAMING_SNAKE_CASE : Dict = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__SCREAMING_SNAKE_CASE : Optional[int] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 661 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
import os
def lowerCAmelCase ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = len(grid[0] )
__SCREAMING_SNAKE_CASE: List[str] = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = 0
__SCREAMING_SNAKE_CASE: List[Any] = 0
__SCREAMING_SNAKE_CASE: Union[str, Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__UpperCAmelCase ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE: List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE: List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE: Union[str, Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE: Union[str, Any] = max(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if max_product > largest:
__SCREAMING_SNAKE_CASE: Any = max_product
return largest
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = []
with open(os.path.dirname(__UpperCAmelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE: List[Any] = [[int(__UpperCAmelCase ) for i in grid[j]] for j in range(len(__UpperCAmelCase ) )]
return largest_product(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 202 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 0 |
'''simple docstring'''
import os
from collections.abc import Iterator
def _A ( lowercase__ = "." ):
for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ):
lowercase__ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip("""./""" )
def _A ( lowercase__ ):
return f'''{i * ' '}*''' if i else "\n##"
def _A ( lowercase__ , lowercase__ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(__UpperCAmelCase )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def _A ( lowercase__ = "." ):
lowercase__ = """"""
for filepath in sorted(good_file_paths(__UpperCAmelCase ) ):
lowercase__ , lowercase__ = os.path.split(__UpperCAmelCase )
if filepath != old_path:
lowercase__ = print_path(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
lowercase__ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'''{md_prefix(__UpperCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 325 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__a = logging.get_logger(__name__)
__a = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __a( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase = '''longformer'''
def __init__( self ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 30_522 ,_SCREAMING_SNAKE_CASE = 768 ,_SCREAMING_SNAKE_CASE = 12 ,_SCREAMING_SNAKE_CASE = 12 ,_SCREAMING_SNAKE_CASE = 3_072 ,_SCREAMING_SNAKE_CASE = "gelu" ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 512 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 0.02 ,_SCREAMING_SNAKE_CASE = 1e-12 ,_SCREAMING_SNAKE_CASE = False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
UpperCAmelCase_ : Dict = attention_window
UpperCAmelCase_ : List[Any] = sep_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = onnx_export
class __a( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "default" ,_SCREAMING_SNAKE_CASE = None ) -> str:
super().__init__(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = True
@property
def a__ ( self ) -> Optional[int]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = super().outputs
if self.task == "default":
UpperCAmelCase_ : List[Any] = {0: '''batch'''}
return outputs
@property
def a__ ( self ) -> Any:
return 1e-4
@property
def a__ ( self ) -> str:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : Union[str, Any] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase_ : List[Any] = 1
return inputs | 30 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 0 |
'''simple docstring'''
A_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 270 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
_SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_SCREAMING_SNAKE_CASE = test_metrics
@require_cpu
def UpperCamelCase ( self ) -> Union[str, Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase ( self ) -> List[Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase ( self ) -> List[str]:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase ( self ) -> Dict:
print(F"Found {torch.cuda.device_count()} devices." )
_SCREAMING_SNAKE_CASE = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 591 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any=8 ):
UpperCamelCase_ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , snake_case : UNetaDConditionModel , snake_case : DDPMScheduler , snake_case : VQModel , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
UpperCamelCase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
if latents is None:
UpperCamelCase_ : Optional[Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCamelCase_ : Union[str, Any] = latents.to(_lowerCAmelCase )
UpperCamelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : str=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase_ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
UpperCamelCase_ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_, UpperCamelCase_ : List[Any] = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
UpperCamelCase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Any , snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case : int = 5_1_2 , snake_case : int = 5_1_2 , snake_case : int = 1_0_0 , snake_case : float = 4.0 , snake_case : int = 1 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[str] = "pil" , snake_case : bool = True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self._execution_device
UpperCamelCase_ : str = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase_ : Dict = torch.cat(_lowerCAmelCase , dim=0 )
UpperCamelCase_ : List[str] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase_ : str = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ : Dict = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCamelCase_ : Tuple = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCamelCase_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self.scheduler.timesteps
UpperCamelCase_ : Any = self.unet.config.in_channels
UpperCamelCase_, UpperCamelCase_ : int = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCamelCase_ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : Dict = {'image_embeds': image_embeds}
UpperCamelCase_ : Tuple = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCamelCase_, UpperCamelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_, UpperCamelCase_ : Dict = noise_pred.chunk(2 )
UpperCamelCase_, UpperCamelCase_ : List[str] = variance_pred.chunk(2 )
UpperCamelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_, UpperCamelCase_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : List[str] = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
UpperCamelCase_ : Optional[int] = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCamelCase_ : Any = image * 0.5 + 0.5
UpperCamelCase_ : int = image.clamp(0 , 1 )
UpperCamelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ : Optional[int] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 417 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class snake_case_ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = """xlnet"""
snake_case__ = ["""mems"""]
snake_case__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self: str , __UpperCAmelCase: Optional[int]=32000 , __UpperCAmelCase: Optional[Any]=1024 , __UpperCAmelCase: str=24 , __UpperCAmelCase: Any=16 , __UpperCAmelCase: Union[str, Any]=4096 , __UpperCAmelCase: List[str]="gelu" , __UpperCAmelCase: Tuple=True , __UpperCAmelCase: Optional[int]="bi" , __UpperCAmelCase: Any=0.02 , __UpperCAmelCase: List[Any]=1E-12 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: List[Any]=512 , __UpperCAmelCase: int=None , __UpperCAmelCase: Dict=True , __UpperCAmelCase: str=False , __UpperCAmelCase: str=False , __UpperCAmelCase: str=-1 , __UpperCAmelCase: Dict=False , __UpperCAmelCase: int="last" , __UpperCAmelCase: List[Any]=True , __UpperCAmelCase: Optional[int]="tanh" , __UpperCAmelCase: Union[str, Any]=0.1 , __UpperCAmelCase: Any=5 , __UpperCAmelCase: List[Any]=5 , __UpperCAmelCase: Optional[int]=5 , __UpperCAmelCase: Optional[int]=1 , __UpperCAmelCase: Dict=2 , **__UpperCAmelCase: str , ) -> Tuple:
'''simple docstring'''
__a : Optional[int] = vocab_size
__a : Dict = d_model
__a : Any = n_layer
__a : Dict = n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__a : Union[str, Any] = d_model // n_head
__a : List[str] = ff_activation
__a : Any = d_inner
__a : Dict = untie_r
__a : Optional[int] = attn_type
__a : Optional[Any] = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : List[Any] = dropout
__a : Dict = mem_len
__a : Union[str, Any] = reuse_len
__a : List[str] = bi_data
__a : Optional[int] = clamp_len
__a : str = same_length
__a : Optional[int] = summary_type
__a : Dict = summary_use_proj
__a : int = summary_activation
__a : Tuple = summary_last_dropout
__a : Optional[Any] = start_n_top
__a : List[str] = end_n_top
__a : Any = bos_token_id
__a : Any = pad_token_id
__a : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _lowerCAmelCase , )
__a : Tuple = kwargs["use_cache"]
__a : str = use_mems_eval
__a : Union[str, Any] = use_mems_train
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def UpperCAmelCase__ (self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: Optional[int] ) -> str:
'''simple docstring'''
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 351 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A_ = 'funnel'
A_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=6_4 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=None , _lowerCamelCase=1e-9 , _lowerCamelCase="mean" , _lowerCamelCase="relative_shift" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , **_lowerCamelCase , )-> Tuple:
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = initializer_range
lowercase__ = initializer_std
lowercase__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowercase__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowercase__ = attention_type
lowercase__ = separate_cls
lowercase__ = truncate_seq
lowercase__ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def snake_case_( self )-> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case_( self , _lowerCamelCase )-> Optional[int]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def snake_case_( self )-> Any:
return len(self.block_sizes )
@num_blocks.setter
def snake_case_( self , _lowerCamelCase )-> List[Any]:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 161 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , )-> bool:
_lowerCamelCase = set()
# Replace all the whitespace in our sentence
_lowerCamelCase = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCAmelCase ) == 26
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , )-> bool:
_lowerCamelCase = [False] * 26
for char in input_str:
if char.islower():
_lowerCamelCase = True
elif char.isupper():
_lowerCamelCase = True
return all(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , )-> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE_ ( )-> None:
from timeit import timeit
_lowerCamelCase = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=__UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 650 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCamelCase ( UpperCamelCase = 200_0000 ) -> int:
"""simple docstring"""
__UpperCAmelCase : int = [0]
__UpperCAmelCase : str = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__UpperCAmelCase : Dict = 0
# the area corresponding to the grid that gives the product closest to target
__UpperCAmelCase : str = 0
# an estimate of b, using the quadratic formula
__UpperCAmelCase : Dict = 42
# the largest integer less than b_estimate
__UpperCAmelCase : List[Any] = 42
# the largest integer less than b_estimate
__UpperCAmelCase : List[str] = 42
# the triangle number corresponding to b_floor
__UpperCAmelCase : List[Any] = 42
# the triangle number corresponding to b_ceil
__UpperCAmelCase : str = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__UpperCAmelCase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__UpperCAmelCase : Optional[int] = floor(__UpperCAmelCase )
__UpperCAmelCase : Tuple = ceil(__UpperCAmelCase )
__UpperCAmelCase : Tuple = triangle_numbers[b_floor]
__UpperCAmelCase : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase : List[str] = triangle_b_first_guess * triangle_a
__UpperCAmelCase : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase : Optional[int] = triangle_b_second_guess * triangle_a
__UpperCAmelCase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : List[str] = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.tokenizer
__SCREAMING_SNAKE_CASE: Any = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
__SCREAMING_SNAKE_CASE: Tuple = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
__SCREAMING_SNAKE_CASE: Dict = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE: Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 202 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( _SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_lowerCAmelCase , speech_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def A__ ( self , lowerCamelCase__ = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase__ , lowerCamelCase__=16_000 , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
_lowerCAmelCase , return_tensors="""pt""" , sampling_rate=_lowerCAmelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(_lowerCAmelCase , max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , normalize=_lowerCAmelCase )[
0
]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ = 1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ = len(_lowerCAmelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowerCAmelCase )}.''' )
# get prompt text embeddings
lowercase__ = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , _lowerCAmelCase , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""""""] * batch_size
elif type(_lowerCAmelCase ) is not type(_lowerCAmelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCAmelCase )} !='''
F''' {type(_lowerCAmelCase )}.''' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(_lowerCAmelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCAmelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1 , _lowerCAmelCase , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device="""cpu""" , dtype=_lowerCAmelCase ).to(
self.device )
else:
lowercase__ = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ = 1 / 0.1_82_15 * latents
lowercase__ = self.vae.decode(_lowerCAmelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 325 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __a( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase = '''gptsan-japanese'''
lowerCAmelCase = [
'''past_key_values''',
]
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=36_000 ,_SCREAMING_SNAKE_CASE=1_280 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=8_192 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE="float32" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=0.0_02 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=35_998 ,_SCREAMING_SNAKE_CASE=35_995 ,_SCREAMING_SNAKE_CASE=35_999 ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : Optional[int] = d_ff
UpperCAmelCase_ : Union[str, Any] = d_ext
UpperCAmelCase_ : str = d_spout
UpperCAmelCase_ : List[Any] = num_switch_layers
UpperCAmelCase_ : str = num_ext_layers
UpperCAmelCase_ : Optional[int] = num_switch_layers + num_ext_layers
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : List[str] = num_experts
UpperCAmelCase_ : Optional[int] = expert_capacity
UpperCAmelCase_ : List[Any] = dropout_rate
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : Tuple = router_bias
UpperCAmelCase_ : Union[str, Any] = router_jitter_noise
UpperCAmelCase_ : int = router_dtype
UpperCAmelCase_ : Any = router_ignore_padding_tokens
UpperCAmelCase_ : int = output_hidden_states
UpperCAmelCase_ : int = output_attentions
UpperCAmelCase_ : Union[str, Any] = initializer_factor
UpperCAmelCase_ : List[str] = output_router_logits
UpperCAmelCase_ : Dict = use_cache
super().__init__(
separator_token_id=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,) | 30 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A_ = logging.get_logger(__name__)
class lowercase_ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 270 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 2_50
_SCREAMING_SNAKE_CASE = ids_tensor((batch_size, length) , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE = torch.ones((batch_size, length) , device=_lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
_SCREAMING_SNAKE_CASE = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = MaxLengthCriteria(max_length=10 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_tensors(5 )
_SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCamelCase ( self ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_SCREAMING_SNAKE_CASE = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
| 591 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
UpperCamelCase_ : Dict = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCamelCase_ : Any = model(_lowerCAmelCase )['last_hidden_state']
UpperCamelCase_ : Dict = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
UpperCamelCase_ : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 417 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 0 |
def a_ (__A , __A , __A , __A ) -> str:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
move_disk(__UpperCAmelCase , __UpperCAmelCase )
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a_ (__A , __A ) -> Optional[Any]:
"""simple docstring"""
print("moving disk from" , __UpperCAmelCase , "to" , __UpperCAmelCase )
def a_ () -> int:
"""simple docstring"""
__a : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(__UpperCAmelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 351 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] =logging.get_logger(__name__)
A_ : Union[str, Any] ='▁'
A_ : Any ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A_ : Any ={
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A_ : Union[str, Any] ={
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
A_ : Dict =['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A_ : List[Any] ={'mustc': MUSTC_LANGS}
class __a ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : List[str] = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_upper_case=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , lang_codes=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCamelCase = do_upper_case
_lowerCamelCase = do_lower_case
_lowerCamelCase = load_json(_lowerCAmelCase )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = spm_file
_lowerCamelCase = load_spm(_lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
_lowerCamelCase = lang_codes
_lowerCamelCase = LANGUAGES[lang_codes]
_lowerCamelCase = [F'<lang:{lang}>' for lang in self.langs]
_lowerCamelCase = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
_lowerCamelCase = self.lang_tokens
_lowerCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_lowerCamelCase = {}
@property
def snake_case_ ( self ):
return len(self.encoder )
@property
def snake_case_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def snake_case_ ( self , a__ ):
_lowerCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCAmelCase )
def snake_case_ ( self , a__ ):
_lowerCamelCase = self.lang_code_to_id[tgt_lang]
_lowerCamelCase = [lang_code_id]
def snake_case_ ( self , a__ ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def snake_case_ ( self , a__ ):
return self.encoder.get(_lowerCAmelCase , self.encoder[self.unk_token] )
def snake_case_ ( self , a__ ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
_lowerCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase = self.sp_model.decode(_lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
_lowerCamelCase = self.sp_model.decode(_lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case_ ( self , a__ , a__=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def snake_case_ ( self ):
_lowerCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase = {}
_lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = Path(_lowerCAmelCase )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
_lowerCamelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_lowerCamelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (str(_lowerCAmelCase ), str(_lowerCAmelCase ))
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Dict[str, Any] )-> sentencepiece.SentencePieceProcessor:
_lowerCamelCase = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Union[Dict, List]:
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : str )-> None:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 650 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'albert-base-v1': 5_1_2,
'albert-large-v1': 5_1_2,
'albert-xlarge-v1': 5_1_2,
'albert-xxlarge-v1': 5_1_2,
'albert-base-v2': 5_1_2,
'albert-large-v2': 5_1_2,
'albert-xlarge-v2': 5_1_2,
'albert-xxlarge-v2': 5_1_2,
}
__SCREAMING_SNAKE_CASE : Any = '▁'
class lowerCamelCase_( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = AlbertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="[CLS]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCamelCase = do_lower_case
_lowerCamelCase = remove_space
_lowerCamelCase = keep_accents
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 661 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
"""simple docstring"""
class a__ :
def __init__( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = {}
def a_ ( self : List[str]):
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(_lowerCAmelCase , " -> " , " -> ".join([str(_lowerCAmelCase) for j in self.vertex[i]]))
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase)
else:
# else make a new vertex
__UpperCAmelCase : str = [to_vertex]
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase)
def a_ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : list):
"""simple docstring"""
__UpperCAmelCase : Dict = True
print(_lowerCAmelCase , end=" ")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase)
if __name__ == "__main__":
A = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 77 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCAmelCase ( UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 , UpperCamelCase__ : float = 1 , UpperCamelCase__ : float = 1.0E4 , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
__SCREAMING_SNAKE_CASE: Optional[int] = float(embedding_dim // 2 )
__SCREAMING_SNAKE_CASE: Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__SCREAMING_SNAKE_CASE: Optional[int] = min_timescale * jnp.exp(jnp.arange(__UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.expand_dims(__UpperCAmelCase , 1 ) * jnp.expand_dims(__UpperCAmelCase , 0 )
# scale embeddings
__SCREAMING_SNAKE_CASE: str = scale * emb
if flip_sin_to_cos:
__SCREAMING_SNAKE_CASE: List[str] = jnp.concatenate([jnp.cos(__UpperCAmelCase ), jnp.sin(__UpperCAmelCase )] , axis=1 )
else:
__SCREAMING_SNAKE_CASE: int = jnp.concatenate([jnp.sin(__UpperCAmelCase ), jnp.cos(__UpperCAmelCase )] , axis=1 )
__SCREAMING_SNAKE_CASE: Any = jnp.reshape(__UpperCAmelCase , [jnp.shape(__UpperCAmelCase )[0], embedding_dim] )
return signal
class a ( nn.Module ):
SCREAMING_SNAKE_CASE__ : str = 32
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.floataa
@nn.compact
def __call__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(_lowerCAmelCase )
return temb
class a ( nn.Module ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = 1
@nn.compact
def __call__( self , _lowerCAmelCase ):
"""simple docstring"""
return get_sinusoidal_embeddings(
_lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 202 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 0 |
'''simple docstring'''
import string
from math import logaa
def _A ( lowercase__ , lowercase__ ):
lowercase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
lowercase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase__ = corpus_without_punctuation.split("""\n""" )
lowercase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCAmelCase ))
def _A ( lowercase__ , lowercase__ , lowercase__=False ):
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _A ( lowercase__ , lowercase__ ):
return round(tf * idf , 3 )
| 325 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=None ) -> int:
UpperCAmelCase_ : List[Any] = np.random.default_rng(_lowerCAmelCase )
UpperCAmelCase_ : str = length
UpperCAmelCase_ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : Optional[int] = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> Any:
return {"x": self.x[i], "y": self.y[i]}
class __a( torch.nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
super().__init__()
UpperCAmelCase_ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : List[Any] = True
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ) -> int:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : int = False
return x * self.a[0] + self.b[0]
class __a( torch.nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=False ) -> List[Any]:
super().__init__()
UpperCAmelCase_ : Tuple = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
UpperCAmelCase_ : Optional[int] = True
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ) -> str:
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase_ : Any = False
return x * self.a + self.b
def lowerCamelCase__ ( _lowercase , _lowercase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Tuple = load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCAmelCase_ : int = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : str = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Tuple = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : Dict = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : List[Any] = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : List[Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
UpperCAmelCase_ : Any = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 30 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ = random.Random()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Tuple:
if rng is None:
snake_case__ : str = global_rng
snake_case__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase_ ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Tuple=2000 , __lowerCamelCase : str=1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[Any]=16000 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=80 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : List[str]=64 , __lowerCamelCase : List[Any]="hann_window" , __lowerCamelCase : Any=80 , __lowerCamelCase : List[Any]=7600 , __lowerCamelCase : List[Any]=1E-10 , __lowerCamelCase : Optional[Any]=True , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[str] = min_seq_length
snake_case__ : Optional[Any] = max_seq_length
snake_case__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Any = feature_size
snake_case__ : Union[str, Any] = padding_value
snake_case__ : List[Any] = sampling_rate
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = num_mel_bins
snake_case__ : List[str] = hop_length
snake_case__ : int = win_length
snake_case__ : Union[str, Any] = win_function
snake_case__ : int = fmin
snake_case__ : List[str] = fmax
snake_case__ : str = mel_floor
snake_case__ : Optional[int] = return_attention_mask
def _lowerCAmelCase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False ):
def _flatten(__lowerCamelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
snake_case__ : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case__ : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Any = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=False ):
if equal_length:
snake_case__ : Any = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : int = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Union[str, Any] = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ = SpeechTaFeatureExtractor
def _lowerCAmelCase ( self : Any ):
snake_case__ : Any = SpeechTaFeatureExtractionTester(self )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _lowerCAmelCase ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : int = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ : str = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
snake_case__ : Union[str, Any] = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
snake_case__ : str = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Any = ['longest', 'max_length', 'do_not_pad']
snake_case__ : List[str] = [None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Optional[int] = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
snake_case__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Any = range(800 , 1400 , 200 )
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
snake_case__ : Any = ['longest', 'max_length', 'do_not_pad']
snake_case__ : List[Any] = [None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : int = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
snake_case__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[Any] = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding='max_length' , return_tensors='np' )
snake_case__ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCAmelCase ( self : Dict ):
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Dict = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding='longest' , return_tensors='np' )
snake_case__ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : List[Any] = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2000 , padding='longest' , return_tensors='np' )
snake_case__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = np.random.rand(100 ).astype(np.floataa )
snake_case__ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case__ : Optional[int] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCAmelCase ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[int] = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : List[Any] = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case__ : str = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
snake_case__ : Any = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
snake_case__ : int = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
snake_case__ : int = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ : List[Any] = np.asarray(_lowerCAmelCase )
snake_case__ : List[Any] = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
snake_case__ : str = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : str ):
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[str] = feat_extract.model_input_names[0]
snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
snake_case__ : int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
snake_case__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
snake_case__ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
snake_case__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : int = feat_extract.model_input_names[0]
snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
snake_case__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCAmelCase ( self : Dict ):
snake_case__ : str = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Any = feat_extract.model_input_names[0]
snake_case__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
snake_case__ : List[str] = feat_extract.num_mel_bins # hack!
snake_case__ : Dict = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
snake_case__ : str = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowerCAmelCase ( self : str ):
snake_case__ : Tuple = self.feat_extract_dict
snake_case__ : List[Any] = True
snake_case__ : List[str] = self.feature_extraction_class(**_lowerCAmelCase )
snake_case__ : int = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Union[str, Any] = [len(_lowerCAmelCase ) for x in speech_inputs]
snake_case__ : List[str] = feat_extract.model_input_names[0]
snake_case__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : Any = feat_extract.num_mel_bins # hack!
snake_case__ : Any = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[str] = self.feat_extract_dict
snake_case__ : List[str] = True
snake_case__ : Tuple = self.feature_extraction_class(**_lowerCAmelCase )
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Optional[int] = [len(_lowerCAmelCase ) for x in speech_inputs]
snake_case__ : Optional[int] = feat_extract.model_input_names[0]
snake_case__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : Optional[Any] = min(_lowerCAmelCase )
snake_case__ : List[str] = feat_extract.num_mel_bins # hack!
snake_case__ : List[str] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
from datasets import load_dataset
snake_case__ : List[str] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case__ : Union[str, Any] = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self : Any ):
# fmt: off
snake_case__ : str = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
snake_case__ : int = self._load_datasamples(1 )
snake_case__ : str = SpeechTaFeatureExtractor()
snake_case__ : Optional[int] = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def _lowerCAmelCase ( self : Optional[int] ):
# fmt: off
snake_case__ : int = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
snake_case__ : Any = self._load_datasamples(1 )
snake_case__ : str = SpeechTaFeatureExtractor()
snake_case__ : Union[str, Any] = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) )
| 270 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
_SCREAMING_SNAKE_CASE = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowerCAmelCase )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(_lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.feature_extraction_class()
self.assertIsNotNone(_lowerCAmelCase )
| 591 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(SCREAMING_SNAKE_CASE_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return wrapper | 32 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 1 |
import heapq
import sys
import numpy as np
UpperCAmelCase_ = tuple[int, int]
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = []
_UpperCAmelCase = set()
def UpperCamelCase( self ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def UpperCamelCase( self ):
return len(self.elements ) == 0
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase( self , _UpperCamelCase ):
if item in self.set:
self.set.remove(_UpperCamelCase )
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase( self ):
return self.elements[0][1]
def UpperCamelCase( self ):
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> Any:
"""simple docstring"""
_UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
return np.linalg.norm(a - b )
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> int:
"""simple docstring"""
return consistent_heuristic(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) // t
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos ) -> Tuple:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : dict[TPos, float] ) -> str:
"""simple docstring"""
_UpperCAmelCase = g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return ans
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = '''*'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase = '''#'''
_UpperCAmelCase = '''-'''
_UpperCAmelCase = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) = x
# print(x)
_UpperCAmelCase = '''-'''
_UpperCAmelCase = back_pointer[x]
_UpperCAmelCase = '''-'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
_UpperCAmelCase = back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE_ , end=''' ''' )
_UpperCAmelCase = back_pointer[x]
print(SCREAMING_SNAKE_CASE_ )
sys.exit()
def A__ ( SCREAMING_SNAKE_CASE_ : TPos ) -> Tuple:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]:
"""simple docstring"""
for itera in range(SCREAMING_SNAKE_CASE_ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE_ )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) = s
_UpperCAmelCase = (x - 1, y)
_UpperCAmelCase = (x + 1, y)
_UpperCAmelCase = (x, y + 1)
_UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = -1
_UpperCAmelCase = float('''inf''' )
if valid(SCREAMING_SNAKE_CASE_ ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase = g_function[s] + 1
_UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE_ ):
if key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) <= Wa * key(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
open_list[j].put(
SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def A__ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCAmelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCAmelCase_ = make_common_ground()
UpperCAmelCase_ = blocks_blk
# hyper parameters
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 20
UpperCAmelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (n - 1, n - 1)
UpperCAmelCase_ = 1
def A__ ( SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : TPos , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {start: 0, goal: float('''inf''' )}
_UpperCAmelCase = {start: -1, goal: -1}
_UpperCAmelCase = []
_UpperCAmelCase = set()
for i in range(SCREAMING_SNAKE_CASE_ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_inad.append(SCREAMING_SNAKE_CASE_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A : bool = field(default=A__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
__A : bool = field(default=A__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__A : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__A : Optional[int] = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__A : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__A : Optional[str] = field(default=A__ , metadata={"""help""": """Source language id for translation."""} )
__A : Optional[str] = field(default=A__ , metadata={"""help""": """Target language id for translation."""} )
__A : Optional[int] = field(default=A__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
__A : bool = field(
default=A__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F'''{split}_results.json''' ) )
def A__ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase = SeqaSeqDataset
# Get datasets
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
_UpperCAmelCase = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCAmelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCAmelCase = data_args.n_val
_UpperCAmelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCAmelCase = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix='''test''' )
_UpperCAmelCase = test_output.metrics
_UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
_UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase_ = TypeVar("T")
UpperCAmelCase_ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase_ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase_ = Union[str, bytes, os.PathLike] | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( A__ ):
__A : List[str] = (EulerDiscreteScheduler,)
__A : Optional[int] = 10
def UpperCamelCase( self , **_UpperCamelCase ):
_UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_UpperCamelCase )
return config
def UpperCamelCase( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def UpperCamelCase( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def UpperCamelCase( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def UpperCamelCase( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3 | 32 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __UpperCamelCase ( A__ ):
__A : Union[str, Any] = """bert"""
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __UpperCamelCase ( A__ ):
@property
def UpperCamelCase( self ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_UpperCamelCase )
_UpperCAmelCase = replicate(_UpperCamelCase )
_UpperCAmelCase = shard(_UpperCamelCase )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_UpperCamelCase , jax.device_count() )
_UpperCAmelCase = sd_pipe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_inference_steps=25 , jit=_UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''stabilityai/stable-diffusion-2'''
_UpperCAmelCase , _UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
_UpperCamelCase , scheduler=_UpperCamelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCAmelCase = scheduler_params
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_UpperCamelCase )
_UpperCAmelCase = replicate(_UpperCamelCase )
_UpperCAmelCase = shard(_UpperCamelCase )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_UpperCamelCase , jax.device_count() )
_UpperCAmelCase = sd_pipe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_inference_steps=25 , jit=_UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 32 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Optional[int] = BarthezTokenizer
__A : Union[str, Any] = BarthezTokenizerFast
__A : List[Any] = True
__A : str = True
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCamelCase )
_UpperCAmelCase = tokenizer
def UpperCamelCase( self ):
_UpperCAmelCase = '''<pad>'''
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_UpperCamelCase ) , 101122 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
_UpperCamelCase , max_length=len(_UpperCamelCase ) , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase( self ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_UpperCamelCase , ) | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : str = """luke"""
def __init__( self , _UpperCamelCase=50267 , _UpperCamelCase=500000 , _UpperCamelCase=768 , _UpperCamelCase=256 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout | 32 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A__ ( SCREAMING_SNAKE_CASE_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_UpperCAmelCase = XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return classifier
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = load_iris()
_UpperCAmelCase , _UpperCAmelCase = data_handling(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.2_5 )
_UpperCAmelCase = iris['''target_names''']
# Create an XGBoost Classifier from the training data
_UpperCAmelCase = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , display_labels=SCREAMING_SNAKE_CASE_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 32 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 1 |
from __future__ import annotations
from math import pow, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) - pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE_ , 2 ) + pow(SCREAMING_SNAKE_CASE_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted)) | 32 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=64 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=[1, 16, 4, 4] , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCAmelCase = (self.image_size // 32) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCamelCase( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ):
_UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A : Optional[Any] = False
__A : Optional[int] = False
__A : int = False
def UpperCamelCase( self ):
_UpperCAmelCase = ViTHybridModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCAmelCase = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCamelCase( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def A__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def UpperCamelCase( self ):
_UpperCAmelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_UpperCAmelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
_UpperCAmelCase = model(**_UpperCamelCase )
_UpperCAmelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCAmelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' ) | 32 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 1 |
import argparse
import os
import re
UpperCAmelCase_ = "src/diffusers"
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ = re.compile(r"\[([^\]]+)\]")
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = _re_indent.search(SCREAMING_SNAKE_CASE_ )
return "" if search is None else search.groups()[0]
def A__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ):
index += 1
_UpperCAmelCase = ['''\n'''.join(lines[:index] )]
else:
_UpperCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCAmelCase = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if index < len(SCREAMING_SNAKE_CASE_ ) - 1:
_UpperCAmelCase = [lines[index + 1]]
index += 1
else:
_UpperCAmelCase = []
else:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
def _inner(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE_ ).lower().replace('''_''' , '''''' )
return _inner
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=None ) -> int:
"""simple docstring"""
def noop(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return x
if key is None:
_UpperCAmelCase = noop
# Constants are all uppercase, they go first.
_UpperCAmelCase = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCAmelCase = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCAmelCase = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()]
_UpperCAmelCase = ignore_underscore(SCREAMING_SNAKE_CASE_ )
return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
def _replace(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
_UpperCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_UpperCAmelCase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]"
_UpperCAmelCase = import_statement.split('''\n''' )
if len(SCREAMING_SNAKE_CASE_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCAmelCase = 2 if lines[1].strip() == '''[''' else 1
_UpperCAmelCase = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCAmelCase = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )
_UpperCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCAmelCase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCAmelCase = keys[:-1]
_UpperCAmelCase = get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE_ )] )
return "\n".join(SCREAMING_SNAKE_CASE_ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCAmelCase = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ )
return import_statement
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCAmelCase = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCAmelCase = main_blocks[block_idx]
_UpperCAmelCase = block.split('''\n''' )
# Get to the start of the imports.
_UpperCAmelCase = 0
while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE_ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCAmelCase = '''\n'''.join(block_lines[line_idx:-1] )
_UpperCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCAmelCase = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCAmelCase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCAmelCase = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCAmelCase = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None]
_UpperCAmelCase = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCAmelCase = 0
_UpperCAmelCase = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_UpperCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE_ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCAmelCase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE_ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
f.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ : List[str]=True ) -> int:
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
_UpperCAmelCase = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , check_only=SCREAMING_SNAKE_CASE_ )
if result:
_UpperCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(F'''Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCAmelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Union[str, Any] = True
__A : str = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase( self ):
_UpperCAmelCase = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase( self ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCamelCase )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase ) | 32 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('''only integers accepted as input''' )
else:
_UpperCAmelCase = str(abs(SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int(''''''.join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod() | 32 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __UpperCamelCase ( A__ ):
__A : Any = """openai-gpt"""
__A : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _UpperCamelCase=40478 , _UpperCamelCase=512 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase="cls_index" , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=0.1 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_UpperCamelCase ) | 32 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 1 |
from numpy import exp, pi, sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""audio""": Audio()} )
__A : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
__A : str = "audio"
__A : str = "transcription"
def UpperCamelCase( self , _UpperCamelCase ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _UpperCamelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.input_schema.copy()
_UpperCAmelCase = features[self.audio_column]
_UpperCAmelCase = input_schema
return task_template
@property
def UpperCamelCase( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.