code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( __A ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self ) -> str:
raise NotImplementedError()
| 81 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A__ = ["""text""", """image""", """audio"""]
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case , snake_case ):
inputs.append(create_inputs(snake_case ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for output in outputs:
if isinstance(snake_case , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(snake_case , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(snake_case , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class __lowerCAmelCase :
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
_lowerCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , _snake_case ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = self.tool(*_snake_case )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCAmelCase = [outputs]
self.assertListEqual(output_types(_snake_case ) , self.tool.outputs )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
for output, output_type in zip(_snake_case , self.tool.outputs ):
_lowerCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_snake_case , _snake_case ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = create_inputs(self.tool.inputs )
_lowerCAmelCase = []
for _input, input_type in zip(_snake_case , self.tool.inputs ):
if isinstance(_snake_case , _snake_case ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCAmelCase = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
| 82 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase : Optional[int] = mock.Mock()
_UpperCamelCase : str = 500
_UpperCamelCase : str = {}
_UpperCamelCase : Tuple = HTTPError
_UpperCamelCase : Tuple = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=lowerCamelCase__ ) as mock_head:
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase : List[Any] = mock.Mock()
_UpperCamelCase : int = 500
_UpperCamelCase : Any = {}
_UpperCamelCase : Dict = HTTPError
_UpperCamelCase : int = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=lowerCamelCase__ ) as mock_head:
_UpperCamelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
_UpperCamelCase : Optional[Any] = tempfile.mktemp()
with open(lowerCamelCase__ ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,lowerCamelCase__ )
_UpperCamelCase : str = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,lowerCamelCase__ )
_UpperCamelCase : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
_UpperCamelCase : List[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls : str ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Tuple = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ ,repo_id='test-tokenizer' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : int = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Tuple = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
_UpperCamelCase : str = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : str = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : List[Any] = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Optional[int] = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : Union[str, Any] = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' ,use_fast=lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Any = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_UpperCamelCase : Dict = Trie()
_UpperCamelCase : Optional[Any] = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ ,['AB', 'C'] )
| 83 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "levit"
def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = image_size
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :Union[str, Any] = kernel_size
lowerCAmelCase_ :Optional[Any] = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :Optional[Any] = hidden_sizes
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :int = depths
lowerCAmelCase_ :List[str] = key_dim
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Optional[int] = patch_size
lowerCAmelCase_ :Union[str, Any] = attention_ratio
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 84 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315 | 0 |
"""simple docstring"""
lowerCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 86 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]:
lowercase__ : str = max_length
lowercase__ : Optional[int] = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
lowercase__ : str = input_ids.shape[-1]
lowercase__ : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , lowercase_ , )
lowercase__ : Optional[int] = start_length
lowercase__ : str = max_new_tokens
lowercase__ : Tuple = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool:
return input_ids.shape[-1] >= self.max_length
class snake_case_ ( __A ):
def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict:
lowercase__ : List[str] = max_time
lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class snake_case_ ( __A ):
@add_start_docstrings(lowercase_ )
def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool:
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int):
lowercase__ : Optional[int] = stopping_criteria.max_length
lowercase__ : str = deepcopy(_lowerCamelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase))
return new_stopping_criteria
| 87 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
@slow
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 88 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = ['audio_values', 'audio_mask']
def __init__( self : Tuple ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : Any=[16, 16] ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Tuple=44100 ,_UpperCAmelCase : Optional[Any]=86 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Union[str, Any]=0.0 ,**_UpperCAmelCase : List[str] ,):
super().__init__(
feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = spectrogram_length
_a : Tuple = num_channels
_a : Dict = patch_size
_a : Optional[Any] = feature_size // self.patch_size[1]
_a : Tuple = n_fft
_a : List[Any] = sampling_rate // hop_length_to_sampling_rate
_a : List[str] = sampling_rate
_a : Union[str, Any] = padding_value
_a : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T
def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ):
_a : List[str] = spectrogram(
_UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
_a : List[Any] = log_spec[:, :-1]
_a : Optional[Any] = log_spec - 20.0
_a : Dict = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self : int ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[bool] = True ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Any ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Optional[Any] = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : Optional[int] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_a : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,_UpperCAmelCase ):
_a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_a : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_a : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_a : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_a : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_a : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_a : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
_a : Any = audio_features[i]
_a : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
_a : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_a : Tuple = {'audio_values': padded_audio_features}
_a : Optional[Any] = BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
return encoded_inputs
| 89 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BarthezTokenizer
snake_case_ = BarthezTokenizerFast
snake_case_ = True
snake_case_ = True
def lowercase_ ( self ) -> int:
'''simple docstring'''
super().setUp()
__lowerCamelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
__lowerCamelCase = tokenizer
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = '<pad>'
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 101_122 )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCamelCase = [0, 57, 3_018, 70_307, 91, 2]
__lowerCamelCase = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = 'I was born in 92000, and this is falsé.'
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
__lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCamelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 90 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase_ : int = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : T):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = data
SCREAMING_SNAKE_CASE_ : Node[T] | None = None
def __str__( self : Union[str, Any]):
'''simple docstring'''
return F'{self.data}'
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Node[T] | None = None
def __iter__( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.next
def __str__( self : Union[str, Any]):
'''simple docstring'''
return "->".join([str(lowercase_) for item in self])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(tuple(iter(self)))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return self.top is None
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : T):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = Node(lowercase_)
if not self.is_empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = self.top
SCREAMING_SNAKE_CASE_ : Optional[Any] = node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''')
assert isinstance(self.top , lowercase_)
SCREAMING_SNAKE_CASE_ : int = self.top
SCREAMING_SNAKE_CASE_ : int = self.top.next
return pop_node.data
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''')
assert self.top is not None
return self.top.data
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 91 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
return EnvironmentCommand()
class a__ ( snake_case__ ):
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
__lowerCAmelCase = parser.add_parser("env" )
download_parser.set_defaults(func=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = huggingface_hub.__version__
__lowerCAmelCase = "not installed"
__lowerCAmelCase = "NA"
if is_torch_available():
import torch
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = "not installed"
if is_transformers_available():
import transformers
__lowerCAmelCase = transformers.__version__
__lowerCAmelCase = "not installed"
if is_accelerate_available():
import accelerate
__lowerCAmelCase = accelerate.__version__
__lowerCAmelCase = "not installed"
if is_xformers_available():
import xformers
__lowerCAmelCase = xformers.__version__
__lowerCAmelCase = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(_A ) )
return info
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 92 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : str = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
_lowercase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = GPTaTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = kwargs.pop('''add_bos_token''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = add_prefix_space
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : str = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
lowercase_ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 93 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
snake_case : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
snake_case : Dict = TaTokenizerFast
snake_case : str = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
snake_case : Any = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 94 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = DPRContextEncoderTokenizer
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = DPRQuestionEncoderTokenizer
UpperCAmelCase : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
UpperCAmelCase : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
UpperCAmelCase : Tuple = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCamelCase__)
class __lowerCAmelCase :
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
a__ : Optional[Any] =titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Union[str, Any] =titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
a__ : List[str] =texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
a__ : int =len(lowerCAmelCase__ )
a__ : str =questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F'''There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts.'''
a__ : int =super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : str =super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : Optional[Any] ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
a__ : List[Any] =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ : Tuple =attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[Any] =reader_input["input_ids"]
a__ , a__ , a__ : Union[str, Any] =reader_output[:3]
a__ : Optional[int] =len(lowerCAmelCase__ )
a__ : Optional[Any] =sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
a__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
a__ : List[str] =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ : Union[str, Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ : int =sequence_ids.index(self.pad_token_id )
else:
a__ : Optional[Any] =len(lowerCAmelCase__ )
a__ : Any =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[int] =[]
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ : str =sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
a__ : Any =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
a__ : List[Any] =end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = ["""input_ids""", """attention_mask"""]
_lowercase : List[str] = DPRReaderTokenizer
| 95 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """blip_text_model"""
def __init__( self , lowercase=30524 , lowercase=768 , lowercase=768 , lowercase=3072 , lowercase=768 , lowercase=12 , lowercase=8 , lowercase=512 , lowercase="gelu" , lowercase=1E-12 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=30522 , lowercase=2 , lowercase=0 , lowercase=102 , lowercase=True , lowercase=True , **lowercase , ):
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Optional[int] = encoder_hidden_size
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Optional[Any] = projection_dim
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[str] = is_decoder
_lowerCamelCase : Optional[Any] = use_cache
@classmethod
def A_ ( cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
_lowerCamelCase, _lowerCamelCase : int = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_lowerCamelCase : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase , **lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """blip_vision_model"""
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=384 , lowercase=16 , lowercase="gelu" , lowercase=1E-5 , lowercase=0.0 , lowercase=1E-10 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Any = projection_dim
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : List[Any] = patch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Tuple = hidden_act
@classmethod
def A_ ( cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
_lowerCamelCase : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase , **lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """blip"""
lowerCamelCase__ = True
def __init__( self , lowercase=None , lowercase=None , lowercase=512 , lowercase=2.65_92 , lowercase=256 , **lowercase , ):
super().__init__(**lowercase )
if text_config is None:
_lowerCamelCase : int = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
_lowerCamelCase : Tuple = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
_lowerCamelCase : Dict = BlipTextConfig(**lowercase )
_lowerCamelCase : Optional[Any] = BlipVisionConfig(**lowercase )
_lowerCamelCase : int = self.vision_config.hidden_size
_lowerCamelCase : List[str] = projection_dim
_lowerCamelCase : List[Any] = logit_scale_init_value
_lowerCamelCase : str = 1.0
_lowerCamelCase : List[Any] = 0.02
_lowerCamelCase : List[Any] = image_text_hidden_size
@classmethod
def A_ ( cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[Any] = self.text_config.to_dict()
_lowerCamelCase : str = self.vision_config.to_dict()
_lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
| 96 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase__ :int = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :List[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :Dict = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[str] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Optional[int] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Any = '''lower newer'''
UpperCamelCase__ :Optional[Any] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''lower newer'''
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :List[str] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :Tuple = self.prepare_image_inputs()
UpperCamelCase__ :Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 97 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = args.log_outputs
UpperCAmelCase__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ = load_metric('wer' )
UpperCAmelCase__ = load_metric('cer' )
# compute metrics
UpperCAmelCase__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCAmelCase__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCAmelCase__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowerCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowerCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ = f'''log_{dataset_id}_predictions.txt'''
UpperCAmelCase__ = f'''log_{dataset_id}_targets.txt'''
with open(lowerCamelCase , 'w' ) as p, open(lowerCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase , lowerCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowerCamelCase , with_indices=lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ = re.sub(lowerCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCAmelCase__ = ' '.join(text.split(lowerCamelCase ) )
return text
def a_ ( lowerCamelCase ):
# load dataset
UpperCAmelCase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ = dataset.cast_column('audio' , Audio(sampling_rate=lowerCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase ):
UpperCAmelCase__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ = prediction['text']
UpperCAmelCase__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCAmelCase__ = dataset.map(lowerCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase__ : Dict = parser.parse_args()
main(args)
| 98 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 0 |
from manim import *
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = Rectangle(height=0.5 , width=0.5)
a__ : str = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ : Any = Rectangle(height=0.25 , width=0.25)
a__ : List[Any] = [mem.copy() for i in range(6)]
a__ : Optional[Any] = [mem.copy() for i in range(6)]
a__ : Tuple = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0)
a__ : List[str] = Text('CPU' , font_size=24)
a__ : Optional[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase)
a__ : Dict = [mem.copy() for i in range(4)]
a__ : Optional[Any] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : str = Text('GPU' , font_size=24)
a__ : Any = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
gpu.move_to([-1, -1, 0])
self.add(lowercase)
a__ : Any = [mem.copy() for i in range(6)]
a__ : List[str] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : int = Text('Model' , font_size=24)
a__ : List[Any] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
model.move_to([3, -1.0, 0])
self.add(lowercase)
a__ : Dict = []
a__ : str = []
for i, rect in enumerate(lowercase):
a__ : Tuple = fill.copy().set_fill(lowercase , opacity=0.8)
target.move_to(lowercase)
model_arr.append(lowercase)
a__ : int = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase , opacity=0.8)
cpu_target.move_to(cpu_left_col_base[i])
model_cpu_arr.append(lowercase)
self.add(*lowercase , *lowercase)
a__ : Dict = [meta_mem.copy() for i in range(6)]
a__ : List[Any] = [meta_mem.copy() for i in range(6)]
a__ : Union[str, Any] = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : Dict = VGroup(*lowercase).arrange(lowercase , buff=0)
a__ : List[Any] = VGroup(lowercase , lowercase).arrange(lowercase , buff=0)
a__ : Dict = Text('Disk' , font_size=24)
a__ : Optional[int] = Group(lowercase , lowercase).arrange(lowercase , buff=0.5 , aligned_edge=lowercase)
disk.move_to([-4, -1.25, 0])
self.add(lowercase , lowercase)
a__ : List[str] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ : List[Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase , lowercase)
a__ : Dict = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(lowercase)
a__ : Dict = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase))
a__ : int = Square(0.3)
input.set_fill(lowercase , opacity=1.0)
input.set_stroke(width=0.0)
input.next_to(model_base[0] , lowercase , buff=0.5)
self.play(Write(lowercase))
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowercase , buff=0.02)
self.play(MoveToTarget(lowercase))
self.play(FadeOut(lowercase))
a__ : str = Arrow(start=lowercase , end=lowercase , color=lowercase , buff=0.5)
a.next_to(model_arr[0].get_left() , lowercase , buff=0.2)
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0])
a__ : Union[str, Any] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase , run_time=3))
a__ : Union[str, Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(lowercase) , Circumscribe(model_arr[0] , color=lowercase , **lowercase) , Circumscribe(model_cpu_arr[0] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , )
self.play(MoveToTarget(model_cpu_arr[0]))
a__ : List[Any] = a.copy()
for i in range(6):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowercase , buff=0.2)
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02)
a__ : Any = AnimationGroup(
FadeOut(lowercase , run_time=0.5) , MoveToTarget(lowercase , run_time=0.5) , FadeIn(lowercase , run_time=0.5) , lag_ratio=0.2)
self.play(lowercase)
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i])
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0])
if i >= 1:
a__ : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] , **lowercase) , Circumscribe(cpu_left_col_base[i] , **lowercase) , Circumscribe(cpu_left_col_base[i + 1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , Circumscribe(model_arr[i + 1] , color=lowercase , **lowercase) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i]) , MoveToTarget(model_cpu_arr[i + 1]) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2)
self.play(
Circumscribe(model_arr[-1] , color=lowercase , **lowercase) , Circumscribe(cpu_left_col_base[-1] , color=lowercase , **lowercase) , Circumscribe(gpu_rect[0] , color=lowercase , **lowercase) , )
self.play(MoveToTarget(model_cpu_arr[i]))
a__ : Optional[int] = a_c
a__ : str = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5)
self.play(
FadeOut(lowercase) , FadeOut(lowercase , run_time=0.5) , )
a__ : List[Any] = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(lowercase , run_time=3) , MoveToTarget(lowercase))
self.wait()
| 99 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 , UpperCamelCase_=1024 , UpperCamelCase_=False , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="""train""" , **UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = tok.pad_token_id
def get_lens(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__SCREAMING_SNAKE_CASE = []
for batch in dl:
__SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(UpperCamelCase_ ).sum(1 ).tolist()
__SCREAMING_SNAKE_CASE = batch["""labels"""].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
__SCREAMING_SNAKE_CASE = get_lens(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="""val""" , **UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 100 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
lowercase__ :Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = set()
# keep track of all the paths to be checked
lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase = queue.pop(0 )
# get the last node from the path
lowercase = path[-1]
if node not in explored:
lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase = list(lowerCAmelCase__ )
new_path.append(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase = [start]
lowercase = set(lowerCAmelCase__ )
# Keep tab on distances from `start` node.
lowercase = {start: 0, target: -1}
while queue:
lowercase = queue.pop(0 )
if node == target:
lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 101 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='donut-swin'
lowerCamelCase__ ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=2_24 , a_=4 , a_=3 , a_=96 , a_=[2, 2, 6, 2] , a_=[3, 6, 12, 24] , a_=7 , a_=4.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=0.02 , a_=1E-5 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : int = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : List[Any] = embed_dim
__snake_case : List[str] = depths
__snake_case : str = len(a_ )
__snake_case : Optional[Any] = num_heads
__snake_case : str = window_size
__snake_case : int = mlp_ratio
__snake_case : Any = qkv_bias
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Optional[int] = drop_path_rate
__snake_case : Any = hidden_act
__snake_case : List[Any] = use_absolute_embeddings
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Any = int(embed_dim * 2 ** (len(a_ ) - 1) )
| 102 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''onnx''']
def __init__( self : str , *A_ : Dict , **A_ : Union[str, Any]):
requires_backends(self , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A_ : List[str] , **A_ : Optional[Any]):
requires_backends(cls , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(cls , ['''onnx'''])
| 103 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->List[str]:
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Dict:
'''simple docstring'''
for char in word:
a : int = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Tuple:
'''simple docstring'''
a : int = set()
for token in tokens:
a : Union[str, Any] = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
a : Union[str, Any] = list(_lowercase )
return word_list
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : set() ) ->Tuple:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a : str = max([len(_lowercase ) for w in chinese_word_set] )
a : List[str] = bert_tokens
a, a : Optional[Any] = 0, len(_lowercase )
while start < end:
a : Optional[int] = True
if is_chinese(bert_word[start] ):
a : List[Any] = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
a : Optional[int] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a : Optional[int] = "##" + bert_word[j]
a : List[Any] = start + i
a : Dict = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : LTP , _lowercase : BertTokenizer ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = []
for i in range(0 , len(_lowercase ) , 100 ):
a : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
a : Tuple = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
a : Optional[int] = []
for i in range(0 , len(_lowercase ) , 100 ):
a : Any = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_lowercase ) == len(_lowercase )
a : int = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
a : Dict = []
for id in input_ids:
a : Optional[int] = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
a : List[Any] = add_sub_symbol(_lowercase , _lowercase )
a : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
a : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a : str = f.readlines()
a : Optional[int] = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a : Optional[int] = LTP(args.ltp ) # faster in GPU device
a : str = BertTokenizer.from_pretrained(args.bert )
a : Optional[int] = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a : Optional[Any] = [json.dumps(_lowercase ) + "\n" for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
a : List[str] = parser.parse_args()
main(args)
| 105 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCamelCase : str = get_logger()
__UpperCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : str=None ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[Any] ):
super().__init__(features=lowercase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowercase_ ,lowercase_ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(lowercase_ )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
lowerCAmelCase__ : Optional[Any] = device if isinstance(lowercase_ ,lowercase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
lowerCAmelCase__ : List[str] = str(jax.devices()[0] )
lowerCAmelCase__ : Optional[int] = jnp_array_kwargs
@staticmethod
def __lowerCAmelCase ( ):
import jax
return {str(lowercase_ ): device for device in jax.devices()}
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[Any] ):
import jax
import jax.numpy as jnp
if isinstance(lowercase_ ,lowercase_ ) and column:
if all(
isinstance(lowercase_ ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowercase_ ,axis=0 )
return column
def __lowerCAmelCase ( self : int ,lowercase_ : str ):
import jax
import jax.numpy as jnp
if isinstance(lowercase_ ,(str, bytes, type(lowercase_ )) ):
return value
elif isinstance(lowercase_ ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
lowerCAmelCase__ : List[Any] = {}
if isinstance(lowercase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase__ : int = {'''dtype''': jnp.intaa}
else:
lowerCAmelCase__ : Optional[Any] = {'''dtype''': jnp.intaa}
elif isinstance(lowercase_ ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
lowerCAmelCase__ : List[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase_ ,PIL.Image.Image ):
lowerCAmelCase__ : Union[str, Any] = np.asarray(lowercase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowercase_ ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCAmelCase ( self : int ,lowercase_ : Tuple ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowercase_ ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowercase_ ,'''__array__''' ) and not isinstance(lowercase_ ,jax.Array ):
lowerCAmelCase__ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase_ ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
elif isinstance(lowercase_ ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
return self._tensorize(lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : dict ):
return map_nested(self._recursive_tensorize ,lowercase_ ,map_list=lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : Tuple = self.numpy_arrow_extractor().extract_row(lowercase_ )
lowerCAmelCase__ : int = self.python_features_decoder.decode_row(lowercase_ )
return self.recursive_tensorize(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : Any = self.numpy_arrow_extractor().extract_column(lowercase_ )
lowerCAmelCase__ : List[Any] = self.python_features_decoder.decode_column(lowercase_ ,pa_table.column_names[0] )
lowerCAmelCase__ : Optional[Any] = self.recursive_tensorize(lowercase_ )
lowerCAmelCase__ : int = self._consolidate(lowercase_ )
return column
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : pa.Table ):
lowerCAmelCase__ : List[Any] = self.numpy_arrow_extractor().extract_batch(lowercase_ )
lowerCAmelCase__ : Dict = self.python_features_decoder.decode_batch(lowercase_ )
lowerCAmelCase__ : int = self.recursive_tensorize(lowercase_ )
for column_name in batch:
lowerCAmelCase__ : Tuple = self._consolidate(batch[column_name] )
return batch
| 106 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def __magic_name__ ( A : int ):
'''simple docstring'''
a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a = 128
elif "12-12" in model_name:
a = 12
a = 12
elif "14-14" in model_name:
a = 14
a = 14
elif "16-16" in model_name:
a = 16
a = 16
else:
raise ValueError("Model not supported" )
a = "huggingface/label-files"
if "speech-commands" in model_name:
a = 35
a = "speech-commands-v2-id2label.json"
else:
a = 527
a = "audioset-id2label.json"
a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( A : Dict ):
'''simple docstring'''
if "module.v" in name:
a = name.replace("module.v", "audio_spectrogram_transformer" )
if "cls_token" in name:
a = name.replace("cls_token", "embeddings.cls_token" )
if "dist_token" in name:
a = name.replace("dist_token", "embeddings.distillation_token" )
if "pos_embed" in name:
a = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
a = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
a = name.replace("attn", "attention.self" )
if "norm1" in name:
a = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2", "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a = name.replace("module.mlp_head.0", "classifier.layernorm" )
if "module.mlp_head.1" in name:
a = name.replace("module.mlp_head.1", "classifier.dense" )
return name
def __magic_name__ ( A : Tuple, A : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = config.hidden_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = val
return orig_state_dict
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A, A )
@torch.no_grad()
def __magic_name__ ( A : List[str], A : Optional[int], A : Any=False ):
'''simple docstring'''
a = get_audio_spectrogram_transformer_config(A )
a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A, map_location="cpu" )
# remove some keys
remove_keys(A )
# rename some keys
a = convert_state_dict(A, A )
# load 🤗 model
a = ASTForAudioClassification(A )
model.eval()
model.load_state_dict(A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
a = 1024 if "speech-commands" not in model_name else 128
a = ASTFeatureExtractor(mean=A, std=A, max_length=A )
if "speech-commands" in model_name:
a = load_dataset("speech_commands", "v0.02", split="validation" )
a = dataset[0]["audio"]["array"]
else:
a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", )
a , a = torchaudio.load(A )
a = waveform.squeeze().numpy()
a = feature_extractor(A, sampling_rate=16000, return_tensors="pt" )
# forward pass
a = model(**A )
a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
a = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3], A, atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(A )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 107 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315 | 0 |
"""simple docstring"""
from math import sqrt
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase : Any = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase : Any = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase : Optional[Any] = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE ):
ans.append(SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Optional[Any] = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase : Union[str, Any] = prime_factorization(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = max(SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase : Any = 0
# prime factorization of 'number'
lowerCAmelCase : str = prime_factorization(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
lowerCAmelCase : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase : Any = get_prime_numbers(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
# run variable for while-loops.
lowerCAmelCase : Dict = 0
lowerCAmelCase : Optional[int] = None
# exit variable. for break up the loops
lowerCAmelCase : Optional[int] = True
while i < len_pn and loop:
lowerCAmelCase : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase : Dict = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (len(SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : int = 0
while numbera != 0:
lowerCAmelCase : Dict = numbera % numbera
lowerCAmelCase : int = numbera
lowerCAmelCase : Optional[Any] = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase : List[str] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase : Dict = prime_factorization(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
lowerCAmelCase : Any = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : List[str] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase : Any = prime_fac_a.count(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = prime_fac_a.count(SCREAMING_SNAKE_CASE )
for _ in range(max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
lowerCAmelCase : List[str] = prime_fac_a.count(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
ans *= n
done.append(SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase : Any = prime_fac_a.count(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
ans *= n
done.append(SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and is_prime(
SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
assert (
is_prime(SCREAMING_SNAKE_CASE ) and is_prime(SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase : List[Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase : Tuple = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase : Any = get_divisors(SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase : Any = gcd(abs(SCREAMING_SNAKE_CASE ) , abs(SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Any = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase : List[str] = ans
ans += fiba
lowerCAmelCase : Optional[Any] = tmp
return ans
| 108 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _snake_case ( UpperCamelCase : Namespace ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
A: Union[str, Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=_SCREAMING_SNAKE_CASE , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"Loading model {model_type}" )
UpperCAmelCase : List[str] = model_type
UpperCAmelCase : List[str] = tf_checkpoint
UpperCAmelCase : List[str] = pytorch_dump_output
UpperCAmelCase : Any = config
UpperCAmelCase : str = finetuning_task_name
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase : List[Any] = self._tf_checkpoint
UpperCAmelCase : List[str] = """"""
else:
UpperCAmelCase : str = self._tf_checkpoint
UpperCAmelCase : Optional[int] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , _SCREAMING_SNAKE_CASE )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 109 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase_ = 1
@register_to_config
def __init__( self : Optional[int] , __lowercase : Any=2000 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[Any]=20 , __lowercase : List[str]=1E-3 ):
"""simple docstring"""
__lowercase =None
__lowercase =None
__lowercase =None
def snake_case ( self : str , __lowercase : Dict , __lowercase : Union[str, torch.device] = None ):
"""simple docstring"""
__lowercase =torch.linspace(1 , self.config.sampling_eps , _UpperCAmelCase , device=_UpperCAmelCase )
def snake_case ( self : str , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[Any]=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase =(
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase =std.unsqueeze(-1 )
__lowercase =-score / std
# compute
__lowercase =-1.0 / len(self.timesteps )
__lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase =beta_t.unsqueeze(-1 )
__lowercase =-0.5 * beta_t * x
__lowercase =torch.sqrt(_UpperCAmelCase )
__lowercase =drift - diffusion**2 * score
__lowercase =x + drift * dt
# add noise
__lowercase =randn_tensor(x.shape , layout=x.layout , generator=_UpperCAmelCase , device=x.device , dtype=x.dtype )
__lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 141 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( __lowerCAmelCase ):
_UpperCamelCase : Dict = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_UpperCamelCase : Any = '''CIDAS/clipseg-rd64-refined'''
_UpperCamelCase : Union[str, Any] = '''image_segmenter'''
_UpperCamelCase : List[Any] = CLIPSegForImageSegmentation
_UpperCamelCase : str = ['''image''', '''text''']
_UpperCamelCase : Optional[int] = ['''image''']
def __init__( self : List[Any] , *_A : List[Any] , **_A : Tuple ) -> str:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self : Tuple , _A : "Image" , _A : str ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def __a ( self : str , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
lowercase : Dict = self.model(**_UpperCAmelCase ).logits
return logits
def __a ( self : Union[str, Any] , _A : Tuple ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = outputs.cpu().detach().numpy()
lowercase : Dict = 0
lowercase : Tuple = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 308 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase : int = '''xlnet'''
UpperCamelCase : List[Any] = ['''mems''']
UpperCamelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=3_20_00 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=True , A="bi" , A=0.02 , A=1e-1_2 , A=0.1 , A=5_12 , A=None , A=True , A=False , A=False , A=-1 , A=False , A="last" , A=True , A="tanh" , A=0.1 , A=5 , A=5 , A=5 , A=1 , A=2 , **A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = n_layer
lowerCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
lowerCamelCase = d_model // n_head
lowerCamelCase = ff_activation
lowerCamelCase = d_inner
lowerCamelCase = untie_r
lowerCamelCase = attn_type
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = dropout
lowerCamelCase = mem_len
lowerCamelCase = reuse_len
lowerCamelCase = bi_data
lowerCamelCase = clamp_len
lowerCamelCase = same_length
lowerCamelCase = summary_type
lowerCamelCase = summary_use_proj
lowerCamelCase = summary_activation
lowerCamelCase = summary_last_dropout
lowerCamelCase = start_n_top
lowerCamelCase = end_n_top
lowerCamelCase = bos_token_id
lowerCamelCase = pad_token_id
lowerCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , _UpperCAmelCase , )
lowerCamelCase = kwargs["""use_cache"""]
lowerCamelCase = use_mems_eval
lowerCamelCase = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __A ( self , A ) -> Any:
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 252 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: int =[[] for _ in range(_snake_case )]
lowerCamelCase__: Union[str, Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1 or len(_snake_case ) <= key:
return input_string
for position, character in enumerate(_snake_case ):
lowerCamelCase__: Union[str, Any] =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: List[str] =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_snake_case )
lowerCamelCase__: str =["".join(_snake_case ) for row in temp_grid]
lowerCamelCase__: List[str] ="".join(_snake_case )
return output_string
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: str =[]
lowerCamelCase__: List[Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1:
return input_string
lowerCamelCase__: Tuple =[[] for _ in range(_snake_case )] # generates template
for position in range(len(_snake_case ) ):
lowerCamelCase__: str =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: int =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCamelCase__: Optional[Any] =0
for row in temp_grid: # fills in the characters
lowerCamelCase__: Optional[int] =input_string[counter : counter + len(_snake_case )]
grid.append(list(_snake_case ) )
counter += len(_snake_case )
lowerCamelCase__: int ="" # reads as zigzag
for position in range(len(_snake_case ) ):
lowerCamelCase__: Dict =position % (lowest * 2) # puts it in bounds
lowerCamelCase__: List[str] =min(_snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( __a ) -> dict[int, str]:
"""simple docstring"""
lowerCamelCase__: Optional[int] ={}
for key_guess in range(1 , len(_snake_case ) ): # tries every key
lowerCamelCase__: List[str] =decrypt(_snake_case , _snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : str = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
"""simple docstring"""
a : Optional[int] ='''camembert'''
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 108 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case__(__lowerCAmelCase ):
"""simple docstring"""
lowercase_ = '''perceiver'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=256 , SCREAMING_SNAKE_CASE : List[Any]=1_280 , SCREAMING_SNAKE_CASE : int=768 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : List[Any]=26 , SCREAMING_SNAKE_CASE : Union[str, Any]=8 , SCREAMING_SNAKE_CASE : Union[str, Any]=8 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[Any]="kv" , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : List[str]="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Dict=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=262 , SCREAMING_SNAKE_CASE : Tuple=2_048 , SCREAMING_SNAKE_CASE : int=56 , SCREAMING_SNAKE_CASE : Tuple=[368, 496] , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Dict=1_920 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : Any=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**_UpperCAmelCase )
lowercase__ : Union[str, Any] = num_latents
lowercase__ : str = d_latents
lowercase__ : List[str] = d_model
lowercase__ : Optional[int] = num_blocks
lowercase__ : Union[str, Any] = num_self_attends_per_block
lowercase__ : List[str] = num_self_attention_heads
lowercase__ : Optional[Any] = num_cross_attention_heads
lowercase__ : List[str] = qk_channels
lowercase__ : Union[str, Any] = v_channels
lowercase__ : Optional[int] = cross_attention_shape_for_attention
lowercase__ : Union[str, Any] = self_attention_widening_factor
lowercase__ : Any = cross_attention_widening_factor
lowercase__ : Dict = hidden_act
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : int = use_query_residual
# masked language modeling attributes
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[Any] = max_position_embeddings
# image classification attributes
lowercase__ : Optional[int] = image_size
# flow attributes
lowercase__ : List[Any] = train_size
# multimodal autoencoding attributes
lowercase__ : List[str] = num_frames
lowercase__ : Dict = audio_samples_per_frame
lowercase__ : Tuple = samples_per_patch
lowercase__ : str = output_shape
class snake_case__(__lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowercase__ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def snake_case ( self : List[str] ):
return 1E-4
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 40 , SCREAMING_SNAKE_CASE : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Union[str, Any] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Union[str, Any] = preprocessor.num_special_tokens_to_add(_UpperCAmelCase )
lowercase__ : str = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : int = [" ".join(["a"] ) * seq_length] * batch_size
lowercase__ : Any = dict(preprocessor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
lowercase__ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : int = compute_effective_axis_dimension(_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ : List[Any] = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ : int = dict(preprocessor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
lowercase__ : str = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 130 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 0 |
import math
def A_ ( ) -> None:
UpperCamelCase : List[Any] = input("Enter message: " )
UpperCamelCase : Optional[Any] = int(input(F"""Enter key [2-{len(_snake_case ) - 1}]: """ ) )
UpperCamelCase : Tuple = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCamelCase : int = encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith("d" ):
UpperCamelCase : List[Any] = decrypt_message(_snake_case , _snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[Any] = [""] * key
for col in range(_snake_case ):
UpperCamelCase : List[Any] = col
while pointer < len(_snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_snake_case )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : List[str] = math.ceil(len(_snake_case ) / key )
UpperCamelCase : List[Any] = key
UpperCamelCase : Dict = (num_cols * num_rows) - len(_snake_case )
UpperCamelCase : Optional[int] = [""] * num_cols
UpperCamelCase : str = 0
UpperCamelCase : Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCamelCase : Optional[int] = 0
row += 1
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->List[Any]:
"""simple docstring"""
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ = ""
else:
a_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = dct.pop(_snake_case )
a_ = val
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) ->str:
"""simple docstring"""
a_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a_ = 8
# set labels if required
if not base_model:
a_ = 1_000
a_ = "huggingface/label-files"
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
a_ = {int(_snake_case ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a_ = 384
a_ = 1_536
a_ = 12
a_ = 6
# load original model from torch hub
a_ = torch.hub.load("facebookresearch/dino:main" , _snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a_ = original_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
a_ = create_rename_keys(_snake_case , base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if base_model:
a_ = ViTModel(_snake_case , add_pooling_layer=_snake_case ).eval()
else:
a_ = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
a_ = ViTImageProcessor()
a_ = image_processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = model(_snake_case )
if base_model:
a_ = original_model(_snake_case )
assert torch.allclose(_snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a_ = original_model(_snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 243 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , ) -> Optional[Any]:
_A : int = size if size is not None else {"""shortest_edge""": 20}
_A : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_A : List[str] = parent
_A : Optional[Any] = batch_size
_A : Dict = num_channels
_A : Optional[int] = image_size
_A : Tuple = min_resolution
_A : List[str] = max_resolution
_A : int = do_resize
_A : Dict = size
_A : str = do_center_crop
_A : Dict = crop_size
_A : Any = do_flip_channel_order
def a__ ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase ( __lowerCAmelCase,unittest.TestCase ):
_a = MobileViTImageProcessor if is_vision_available() else None
def a__ ( self ) -> List[Any]:
_A : List[str] = MobileViTImageProcessingTester(self )
@property
def a__ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Tuple:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_flip_channel_order""" ) )
def a__ ( self ) -> Tuple:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> int:
# Initialize image_processing
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : List[Any] = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : List[Any] = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 26 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : List[str] ={"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase ( __lowerCAmelCase ):
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =Rectangle(height=0.5 , width=0.5 )
__lowercase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowercase =Rectangle(height=0.2_5 , width=0.2_5 )
__lowercase =[mem.copy() for i in range(6 )]
__lowercase =[mem.copy() for i in range(6 )]
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =Text('CPU' , font_size=24 )
__lowercase =Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
__lowercase =[mem.copy() for i in range(4 )]
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =Text('GPU' , font_size=24 )
__lowercase =Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
__lowercase =[mem.copy() for i in range(6 )]
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =Text('Model' , font_size=24 )
__lowercase =Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
__lowercase =[]
__lowercase =[]
for i, rect in enumerate(_UpperCAmelCase ):
__lowercase =fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
__lowercase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
__lowercase =[meta_mem.copy() for i in range(6 )]
__lowercase =[meta_mem.copy() for i in range(6 )]
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__lowercase =Text('Disk' , font_size=24 )
__lowercase =Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
__lowercase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
__lowercase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
__lowercase =MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
__lowercase =Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
__lowercase =Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowercase =MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
__lowercase ={'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowercase =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__lowercase =AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowercase =0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowercase =a_c
__lowercase =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
__lowercase =MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 141 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def snake_case( __magic_name__ , __magic_name__ ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_snake_case ) - np.asarray(_snake_case )) ** 2 ) )
def snake_case( __magic_name__ , __magic_name__ ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_snake_case , _snake_case ) ) ** (1 / 2)
if __name__ == "__main__":
def snake_case( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 308 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = list(_snake_case )
lowerCamelCase = list(_snake_case )
lowerCamelCase = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count += 1
lowerCamelCase = """_"""
if count > 1:
return False
else:
return "".join(_snake_case )
def __lowerCamelCase ( lowerCamelCase__ : list[str] ):
'''simple docstring'''
lowerCamelCase = []
while True:
lowerCamelCase = ["""$"""] * len(_snake_case )
lowerCamelCase = []
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
lowerCamelCase = compare_string(binary[i] , binary[j] )
if k is False:
lowerCamelCase = """*"""
lowerCamelCase = """*"""
temp.append("""X""" )
for i in range(len(_snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_snake_case ) == 0:
return pi
lowerCamelCase = list(set(_snake_case ) )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Sequence[float] ):
'''simple docstring'''
lowerCamelCase = []
for minterm in minterms:
lowerCamelCase = """"""
for _ in range(_snake_case ):
lowerCamelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_snake_case )
return temp
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = list(_snake_case )
lowerCamelCase = list(_snake_case )
lowerCamelCase = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[str] ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = [0] * len(_snake_case )
for i in range(len(chart[0] ) ):
lowerCamelCase = 0
lowerCamelCase = -1
for j in range(len(_snake_case ) ):
if chart[j][i] == 1:
count += 1
lowerCamelCase = j
if count == 1:
lowerCamelCase = 1
for i in range(len(_snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_snake_case ) ):
lowerCamelCase = 0
temp.append(prime_implicants[i] )
while True:
lowerCamelCase = 0
lowerCamelCase = -1
lowerCamelCase = 0
for i in range(len(_snake_case ) ):
lowerCamelCase = chart[i].count(1 )
if count_n > max_n:
lowerCamelCase = count_n
lowerCamelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_snake_case ) ):
lowerCamelCase = 0
def __lowerCamelCase ( lowerCamelCase__ : list[str] , lowerCamelCase__ : list[str] ):
'''simple docstring'''
lowerCamelCase = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )]
for i in range(len(_snake_case ) ):
lowerCamelCase = prime_implicants[i].count("""_""" )
for j in range(len(_snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , _snake_case ):
lowerCamelCase = 1
return chart
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = int(input("""Enter the no. of variables\n""" ) )
lowerCamelCase = [
float(_snake_case )
for x in input(
"""Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split()
]
lowerCamelCase = decimal_to_binary(_snake_case , _snake_case )
lowerCamelCase = check(_snake_case )
print("""Prime Implicants are:""" )
print(_snake_case )
lowerCamelCase = prime_implicant_chart(_snake_case , _snake_case )
lowerCamelCase = selection(_snake_case , _snake_case )
print("""Essential Prime Implicants are:""" )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
lowercase_ = '''visual_bert'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Union[str, Any]=3_072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-1_2 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : str=2 , **UpperCAmelCase_ : Any , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
lowerCamelCase__: List[Any] =vocab_size
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: int =visual_embedding_dim
lowerCamelCase__: int =num_hidden_layers
lowerCamelCase__: Optional[int] =num_attention_heads
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: int =hidden_act
lowerCamelCase__: str =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: List[str] =initializer_range
lowerCamelCase__: Union[str, Any] =type_vocab_size
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: int =bypass_transformer
lowerCamelCase__: Any =special_visual_initialize
| 10 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : List[str] = logging.getLogger()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : List[str] = parser.parse_args()
return args.f
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple="eval" ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = os.path.join(_snake_case , F"{split}_results.json" )
if os.path.exists(_snake_case ):
with open(_snake_case , '''r''' ) as f:
return json.load(_snake_case )
raise ValueError(F"can\'t find {path}" )
__UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] = F"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_flax_glue.main()
UpperCamelCase__ : List[str] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] = F"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_clm_flax.main()
UpperCamelCase__ : List[Any] = get_results(_UpperCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : str = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Dict = F"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_summarization_flax.main()
UpperCamelCase__ : Optional[Any] = get_results(_UpperCAmelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Union[str, Any] = F"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_mlm_flax.main()
UpperCamelCase__ : int = get_results(_UpperCAmelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str = F"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCamelCase__ : str = 7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] = F"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_flax_ner.main()
UpperCamelCase__ : Optional[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str = F"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
run_qa.main()
UpperCamelCase__ : Union[str, Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 146 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = 0
def lowercase__ ( self ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoConfig.for_model("roberta" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase : List[str] = os.path.join(_UpperCAmelCase , "fake-roberta" )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , _UpperCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoConfig.register("model" , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoConfig.register("bert" , _UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained("bert-base" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase , revision="aaaaaa" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
lowerCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCAmelCase )
lowerCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
lowerCAmelCase : str = AutoConfig.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowercase__ ( self ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
"""simple docstring"""
a : Union[str, Any] ='''new-model'''
try:
AutoConfig.register("new-model" , _UpperCAmelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
lowerCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 108 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase__ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCAmelCase__ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCAmelCase__ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase__ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase__ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
lowercase__ : Dict = k.replace(_snake_case , _snake_case )
return k
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = BigBirdPegasusConfig(**_snake_case )
lowercase__ : Optional[Any] = BigBirdPegasusForConditionalGeneration(_snake_case )
lowercase__ : Dict = torch_model.state_dict()
lowercase__ : int = {}
# separating decoder weights
lowercase__ : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
lowercase__ : List[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
lowercase__ : Optional[int] = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE]
if any(_snake_case ):
continue
lowercase__ : Dict = DECODER_PATTERNS
lowercase__ : Dict = rename_state_dict_key(_snake_case , _snake_case )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowercase__ : str = v.T
lowercase__ : Tuple = torch.from_numpy(_snake_case )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
lowercase__ : Any = [k.endswith(_snake_case ) for ending in KEYS_TO_IGNORE]
if any(_snake_case ):
continue
lowercase__ : Tuple = REMAINING_PATTERNS
lowercase__ : Union[str, Any] = rename_state_dict_key(_snake_case , _snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowercase__ : List[str] = v.T
lowercase__ : Optional[int] = torch.from_numpy(_snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
lowercase__ : int = mapping["model.embed_positions.weight"]
lowercase__ : List[Any] = mapping.pop("model.embed_positions.weight" )
lowercase__ , lowercase__ : Union[str, Any] = torch_model.load_state_dict(_snake_case , strict=_snake_case )
lowercase__ : List[str] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = tf.train.list_variables(_snake_case )
lowercase__ : List[str] = {}
lowercase__ : Tuple = ["global_step"]
for name, shape in tqdm(_snake_case , desc="converting tf checkpoint to dict" ):
lowercase__ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase__ : Any = tf.train.load_variable(_snake_case , _snake_case )
lowercase__ : str = array
return tf_weights
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = get_tf_weights_as_numpy(_snake_case )
lowercase__ : str = convert_bigbird_pegasus(_snake_case , _snake_case )
torch_model.save_pretrained(_snake_case )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 130 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
import copy
import re
class A__ :
_UpperCAmelCase :Union[str, Any] = '''hp'''
_UpperCAmelCase :str = {}
_UpperCAmelCase :List[Any] = None
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = prefix
UpperCamelCase : List[str] = defaults
cls.build_naming_info()
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return ""
UpperCamelCase : List[str] = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
UpperCamelCase : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ ):
UpperCamelCase : int = ""
while integer != 0:
UpperCamelCase : Union[str, Any] = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCamelCase : List[Any] = 0
while True:
UpperCamelCase : int = word + "#" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase : List[Any] = sword
break
UpperCamelCase : Dict = short_word
UpperCamelCase : Optional[int] = word
return short_word
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = param_name.split("_" )
UpperCamelCase : int = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase : int = ["", "_"]
for separator in separators:
UpperCamelCase : str = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase : List[Any] = shortname
UpperCamelCase : str = param_name
return shortname
return param_name
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : str = short_name
UpperCamelCase : int = param_name
@classmethod
def __UpperCamelCase( cls ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
UpperCamelCase : Dict = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCamelCase : Tuple = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : int = info
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase : Optional[int] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase : Any = cls.NAMING_INFO["short_param"][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase : str = 1 if v else 0
UpperCamelCase : str = "" if isinstance(_UpperCAmelCase , (int, float) ) else "-"
UpperCamelCase : int = F"""{key}{sep}{v}"""
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
UpperCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase : List[Any] = []
else:
UpperCamelCase : Dict = repr.split("_" )
UpperCamelCase : List[str] = {}
for value in values:
if "-" in value:
UpperCamelCase , UpperCamelCase : Union[str, Any] = value.split("-" )
else:
UpperCamelCase : Optional[int] = re.sub("[0-9.]" , "" , _UpperCAmelCase )
UpperCamelCase : Union[str, Any] = float(re.sub("[^0-9.]" , "" , _UpperCAmelCase ) )
UpperCamelCase : List[Any] = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCamelCase : Any = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase : Any = cls.DEFAULTS[k]
return parameters
| 52 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self , __UpperCAmelCase) ->List[str]:
a_ = str(id_)
a_ = None
a_ = None
a_ = []
a_ = {} # {vertex:distance}
def __lt__( self , __UpperCAmelCase) ->int:
return self.key < other.key
def __repr__( self) ->str:
return self.id
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
self.neighbors.append(_UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = weight
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _snake_case )
graph[b - 1].add_edge(graph[a - 1] , _snake_case )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list:
"""simple docstring"""
a_ = []
for u in graph:
a_ = math.inf
a_ = None
a_ = 0
a_ = graph[:]
while q:
a_ = min(_snake_case )
q.remove(_snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a_ = u
a_ = u.edges[v.id]
for i in range(1 , len(_snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Iterator[tuple]:
"""simple docstring"""
for u in graph:
a_ = math.inf
a_ = None
a_ = 0
a_ = list(_snake_case )
hq.heapify(_snake_case )
while h:
a_ = hq.heappop(_snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a_ = u
a_ = u.edges[v.id]
hq.heapify(_snake_case )
for i in range(1 , len(_snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ ( snake_case_ ):
return "".join(sorted(_snake_case ) )
def lowerCAmelCase_ ( snake_case_ ):
return word_by_signature[signature(_snake_case )]
_snake_case = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
_snake_case = sorted({word.strip().lower() for word in data.splitlines()})
_snake_case = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_snake_case = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase : Union[str, Any] =logging.getLogger(__name__)
def lowerCAmelCase_ ( _lowercase : torch.nn.Module , _lowercase : BnbQuantizationConfig , _lowercase : Union[str, os.PathLike] = None , _lowercase : Optional[Dict[str, Union[int, str, torch.device]]] = None , _lowercase : Optional[List[str]] = None , _lowercase : Optional[Dict[Union[int, str], Union[int, str]]] = None , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , ) -> int:
"""simple docstring"""
a__ : Tuple = bnb_quantization_config.load_in_abit
a__ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""")
a__ : str = []
# custom device map
if isinstance(_snake_case , _snake_case) and len(device_map.keys()) > 1:
a__ : int = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a__ : Optional[Any] = get_keys_to_not_convert(_snake_case)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case)
a__ : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a__ : str = []
a__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case)
# compatibility with peft
a__ : Tuple = load_in_abit
a__ : List[Any] = load_in_abit
a__ : List[str] = get_parameter_device(_snake_case)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""")
a__ : Optional[Any] = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case)
# convert param to the right dtype
a__ : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
a__ : Dict = name.replace(""".weight""" , """""").replace(""".bias""" , """""")
a__ : Dict = getattr(_snake_case , _snake_case , _snake_case)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(_snake_case):
param.to(_snake_case)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
a__ : List[Any] = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case)
a__ : List[Any] = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a__ : str = True
a__ : Tuple = any(x in list(device_map.values()) for x in ["""cpu""", """disk"""])
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case)
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any=None , _lowercase : Dict=None , _lowercase : int=None) -> Any:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a__ : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""")
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""")
if isinstance(_snake_case , _snake_case):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""")
a__ : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
a__ : Dict = {}
a__ : Optional[Any] = special_dtypes
a__ : List[str] = no_split_module_classes
a__ : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a__ : Dict = get_balanced_memory(
_snake_case , low_zero=(device_map == """balanced_low_0""") , max_memory=_snake_case , **_snake_case , )
a__ : Any = max_memory
a__ : Optional[Any] = infer_auto_device_map(_snake_case , **_snake_case)
if isinstance(_snake_case , _snake_case):
# check if don't have any quantized module on the cpu
a__ : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a__ : Any = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n """)
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""")
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Union[str, Any]=None , _lowercase : Tuple=None) -> Tuple:
"""simple docstring"""
if modules_to_not_convert is None:
a__ : str = []
a__ , a__ : Optional[Any] = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case)
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""")
return model
def lowerCAmelCase_ ( _lowercase : int , _lowercase : str , _lowercase : str=None , _lowercase : Optional[int]=None , ) -> str:
"""simple docstring"""
a__ : Any = False
for name, module in model.named_children():
if current_key_name is None:
a__ : Union[str, Any] = []
current_key_name.append(_snake_case)
if isinstance(_snake_case , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a__ : List[Any] = """.""".join(_snake_case)
a__ : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a__ : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a__ : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a__ : Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""")
a__ : Optional[Any] = module.weight.data
if module.bias is not None:
a__ : Any = module.bias.data
bnb_module.requires_grad_(_snake_case)
setattr(_snake_case , _snake_case , _snake_case)
a__ : str = True
if len(list(module.children())) > 0:
a__ , a__ : int = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case)
a__ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[int]:
"""simple docstring"""
with init_empty_weights():
a__ : Tuple = deepcopy(_snake_case) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a__ : Tuple = find_tied_parameters(_snake_case)
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case):
a__ : Union[str, Any] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
a__ : Optional[Any] = sum(_snake_case , [])
a__ : Dict = len(_snake_case) > 0
# Check if it is a base model
a__ : Optional[Any] = False
if hasattr(_snake_case , """base_model_prefix"""):
a__ : int = not hasattr(_snake_case , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a__ : List[str] = list(model.named_children())
a__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
a__ : Optional[int] = set(_snake_case) - set(_snake_case)
a__ : str = list(set(_snake_case)) + list(_snake_case)
# remove ".weight" from the keys
a__ : List[str] = [""".weight""", """.bias"""]
a__ : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a__ : Any = name.replace(_snake_case , """""")
filtered_module_names.append(_snake_case)
return filtered_module_names
def lowerCAmelCase_ ( _lowercase : Tuple) -> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit):
return True
return False
def lowerCAmelCase_ ( _lowercase : nn.Module) -> Optional[Any]:
"""simple docstring"""
return next(parameter.parameters()).device
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Any , _lowercase : str) -> Any:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case)
a__ : Any = param_name
a__ : Tuple = model
if "." in tensor_name:
a__ : List[str] = tensor_name.split(""".""")
for split in splits[:-1]:
a__ : Optional[Any] = getattr(_snake_case , _snake_case)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
a__ : List[Any] = new_module
a__ : Any = splits[-1]
# offload weights
a__ : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case)
if hasattr(module._parameters[tensor_name] , """SCB"""):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""") , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case)
offload_weight(_snake_case , param_name.replace("""weight""" , """SCB""") , _snake_case , index=_snake_case)
set_module_tensor_to_device(_snake_case , _snake_case , """meta""" , dtype=_snake_case , value=torch.empty(*param.size()))
| 170 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =ArgumentParser('Diffusers CLI tool', usage='diffusers-cli <command> [<args>]' )
__lowercase =parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_snake_case )
# Let's go
__lowercase =parser.parse_args()
if not hasattr(_snake_case, 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase =args.func(_snake_case )
service.run()
if __name__ == "__main__":
main()
| 141 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _A :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _A :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=__lowerCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_snake_case , os.path.join(_snake_case , F"""{split}_results.json""" ) )
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : str = parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Tuple = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_snake_case , _snake_case , _snake_case ):
assert hasattr(_snake_case , _snake_case ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case , _snake_case ):
lowercase : Optional[int] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Any = SeqaSeqDataset
# Get datasets
lowercase : str = (
dataset_class(
_snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
lowercase : Union[str, Any] = (
dataset_class(
_snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[int] = (
dataset_class(
_snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator(
_snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , )
lowercase : Tuple = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowercase : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : Union[str, Any] = train_result.metrics
lowercase : Any = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : str = trainer.evaluate(metric_key_prefix='''val''' )
lowercase : str = data_args.n_val
lowercase : int = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase : Optional[int] = trainer.predict(test_dataset=_snake_case , metric_key_prefix='''test''' )
lowercase : Optional[Any] = test_output.metrics
lowercase : Tuple = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Any = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
lowercase : Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
lowercase : Any = lmap(str.strip , _snake_case )
write_txt_file(_snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 308 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , ) -> Dict:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = 13
lowerCamelCase = 7
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = 99
lowerCamelCase = 32
lowerCamelCase = 2
lowerCamelCase = 4
lowerCamelCase = 37
lowerCamelCase = """gelu"""
lowerCamelCase = 0.1
lowerCamelCase = 0.1
lowerCamelCase = 5_12
lowerCamelCase = 16
lowerCamelCase = 2
lowerCamelCase = 0.02
lowerCamelCase = 3
lowerCamelCase = 4
lowerCamelCase = None
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[int]:
'''simple docstring'''
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase = True
lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A , A , A , A , A , A ) -> str:
'''simple docstring'''
lowerCamelCase = TFEsmModel(config=_UpperCAmelCase )
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase = model(_UpperCAmelCase )
lowerCamelCase = [input_ids, input_mask]
lowerCamelCase = model(_UpperCAmelCase )
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = TFEsmModel(config=_UpperCAmelCase )
lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCamelCase = model(_UpperCAmelCase )
lowerCamelCase = [input_ids, input_mask]
lowerCamelCase = model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = TFEsmForMaskedLM(config=_UpperCAmelCase )
lowerCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFEsmForTokenClassification(config=_UpperCAmelCase )
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Tuple = False
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = TFEsmModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase = model.get_bias()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase , tf.Variable )
else:
lowerCamelCase = model.get_output_embeddings()
assert x is None
lowerCamelCase = model.get_bias()
assert name is None
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_UpperCAmelCase )[0]
lowerCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 252 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
lowercase_ = ['''image_processor''', '''tokenizer''']
lowercase_ = '''AutoImageProcessor'''
lowercase_ = '''AutoTokenizer'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
lowerCamelCase__: str =kwargs.pop("feature_extractor")
lowerCamelCase__: str =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
lowerCamelCase__: Tuple =self.image_processor
lowerCamelCase__: List[Any] =False
def __call__(self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase)
lowerCamelCase__: Dict =kwargs.pop("images" , _UpperCAmelCase)
lowerCamelCase__: Union[str, Any] =kwargs.pop("text" , _UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
lowerCamelCase__: int =args[0]
lowerCamelCase__: Optional[Any] =args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
lowerCamelCase__: List[str] =self.image_processor(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
if text is not None:
lowerCamelCase__: List[Any] =self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__: str =encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@contextmanager
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call.")
lowerCamelCase__: str =True
lowerCamelCase__: Any =self.tokenizer
yield
lowerCamelCase__: Union[str, Any] =self.image_processor
lowerCamelCase__: Union[str, Any] =False
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=None) ->Union[str, Any]:
'''simple docstring'''
if added_vocab is None:
lowerCamelCase__: int =self.tokenizer.get_added_vocab()
lowerCamelCase__: List[Any] ={}
while tokens:
lowerCamelCase__: Dict =re.search(R"<s_(.*?)>" , _UpperCAmelCase , re.IGNORECASE)
if start_token is None:
break
lowerCamelCase__: List[str] =start_token.group(1)
lowerCamelCase__: Optional[int] =re.search(RF"""</s_{key}>""" , _UpperCAmelCase , re.IGNORECASE)
lowerCamelCase__: Union[str, Any] =start_token.group()
if end_token is None:
lowerCamelCase__: int =tokens.replace(_UpperCAmelCase , "")
else:
lowerCamelCase__: str =end_token.group()
lowerCamelCase__: Tuple =re.escape(_UpperCAmelCase)
lowerCamelCase__: Union[str, Any] =re.escape(_UpperCAmelCase)
lowerCamelCase__: str =re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _UpperCAmelCase , re.IGNORECASE)
if content is not None:
lowerCamelCase__: Any =content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase__: List[str] =self.tokenajson(_UpperCAmelCase , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase)
if value:
if len(_UpperCAmelCase) == 1:
lowerCamelCase__: Any =value[0]
lowerCamelCase__: List[Any] =value
else: # leaf nodes
lowerCamelCase__: List[Any] =[]
for leaf in content.split(R"<sep/>"):
lowerCamelCase__: List[Any] =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase__: Tuple =leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase)
if len(output[key]) == 1:
lowerCamelCase__: str =output[key][0]
lowerCamelCase__: str =tokens[tokens.find(_UpperCAmelCase) + len(_UpperCAmelCase) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase)
if len(_UpperCAmelCase):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 10 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : Optional[Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : List[str] = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Union[str, Any] = VOCAB_FILES_NAMES
A: List[str] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: int = ['''input_ids''', '''attention_mask''']
A: Tuple = BarthezTokenizer
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : str="<s>" , lowerCamelCase__ : Optional[Any]="</s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : List[Any]="<pad>" , lowerCamelCase__ : List[str]="<mask>" , **lowerCamelCase__ : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase__ : List[str] = vocab_file
UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> int:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Any:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 146 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(r'''^\s*else:''')
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with open(_snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : str = f.readlines()
lowerCAmelCase : Union[str, Any] = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : Any = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : List[Any] = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Any = re.findall(r"\[([^\]]+)\]" , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase : Optional[Any] = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase : Any = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : Union[str, Any] = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(", " )
lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : List[str] = _re_between_brackets.search(_snake_case ).groups()[0].split(", " )
lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCAmelCase : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[int] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase : List[Any] = lines[line_index]
lowerCAmelCase : List[str] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase : List[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE : List[str] ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : List[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : int = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = os.path.join(_snake_case , "__init__.py" )
lowerCAmelCase : Dict = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Any = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : Union[str, Any] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError("\n\n".join(_snake_case ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase : int = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : str = short_path.replace(os.path.sep , "." )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Any = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_snake_case )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def a__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowerCAmelCase : Dict = direct_transformers_import(_snake_case )
lowerCAmelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , "__init__.py" ) , "r" ) as f:
lowerCAmelCase : int = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _snake_case ) ) )
lowerCAmelCase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
lowerCAmelCase : int = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = image.size
lowercase__ , lowercase__ : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : Optional[int] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowercase__ : List[str] = np.array(_snake_case ).astype(np.floataa ) / 255.0
lowercase__ : str = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ : Optional[int] = torch.from_numpy(_snake_case )
return 2.0 * image - 1.0
class snake_case__(__lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : VQModel , SCREAMING_SNAKE_CASE : UNetaDModel , SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : Optional[int] = 100 , SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowercase__ : Tuple = 1
elif isinstance(_UpperCAmelCase , torch.Tensor ):
lowercase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowercase__ : Optional[int] = preprocess(_UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Dict = next(self.unet.parameters() ).dtype
lowercase__ : Tuple = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
lowercase__ : int = image.to(device=self.device , dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device )
lowercase__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Dict = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Union[str, Any] = {}
if accepts_eta:
lowercase__ : str = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : Tuple = torch.cat([latents, image] , dim=1 )
lowercase__ : List[str] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
lowercase__ : Union[str, Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Any = self.vqvae.decode(_UpperCAmelCase ).sample
lowercase__ : List[str] = torch.clamp(_UpperCAmelCase , -1.0 , 1.0 )
lowercase__ : List[str] = image / 2 + 0.5
lowercase__ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 130 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 0 |
def A_ ( _lowerCAmelCase ) -> list[list[float]]:
UpperCamelCase : Optional[Any] = []
for data in source_data:
for i, el in enumerate(_snake_case ):
if len(_snake_case ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_snake_case ) )
return data_lists
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[list[float]]:
UpperCamelCase : Tuple = []
for dlist, weight in zip(_snake_case , _snake_case ):
UpperCamelCase : Optional[Any] = min(_snake_case )
UpperCamelCase : Union[str, Any] = max(_snake_case )
UpperCamelCase : Union[str, Any] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase : List[str] = F"""Invalid weight of {weight:f} provided"""
raise ValueError(_snake_case )
score_lists.append(_snake_case )
return score_lists
def A_ ( _lowerCAmelCase ) -> list[float]:
UpperCamelCase : Union[str, Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_snake_case ):
UpperCamelCase : Optional[int] = final_scores[j] + ele
return final_scores
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[list[float]]:
UpperCamelCase : int = get_data(_snake_case )
UpperCamelCase : List[str] = calculate_each_score(_snake_case , _snake_case )
UpperCamelCase : Optional[Any] = generate_final_scores(_snake_case )
# append scores to source data
for i, ele in enumerate(_snake_case ):
source_data[i].append(_snake_case )
return source_data
| 52 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = XLMRobertaModel.from_pretrained("xlm-roberta-base")
a_ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
a_ = torch.Size((1, 12, 7_68)) # batch_size, sequence_length, embedding_vector_dim
a_ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a_ = model(_UpperCAmelCase)["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1E-3))
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = XLMRobertaModel.from_pretrained("xlm-roberta-large")
a_ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]])
# The dog is cute and lives in the garden house
a_ = torch.Size((1, 12, 10_24)) # batch_size, sequence_length, embedding_vector_dim
a_ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a_ = model(_UpperCAmelCase)["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1E-3))
| 243 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ) -> int:
_A : Tuple = parent
_A : List[str] = batch_size
_A : Any = seq_length
_A : Dict = is_training
_A : int = use_input_mask
_A : Dict = use_token_type_ids
_A : Union[str, Any] = use_labels
_A : Union[str, Any] = vocab_size
_A : Union[str, Any] = hidden_size
_A : List[Any] = num_hidden_layers
_A : Dict = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Dict = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : Union[str, Any] = max_position_embeddings
_A : List[Any] = type_vocab_size
_A : Tuple = type_sequence_label_size
_A : List[Any] = initializer_range
_A : str = num_labels
_A : Optional[Any] = num_choices
_A : Tuple = scope
_A : int = projection_dim
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A : int = random_attention_mask([self.batch_size, self.seq_length] )
_A : Union[str, Any] = None
if self.use_token_type_ids:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Dict = None
_A : Tuple = None
_A : Optional[int] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_A : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
_A : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : List[str] = TFDPRContextEncoder(config=_UpperCAmelCase )
_A : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
_A : List[Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
_A : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_A : Any = TFDPRQuestionEncoder(config=_UpperCAmelCase )
_A : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
_A : Union[str, Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
_A : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : str = TFDPRReader(config=_UpperCAmelCase )
_A : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a__ ( self ) -> List[Any]:
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : List[str] = config_and_inputs
_A : Optional[int] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class lowercase ( __lowerCAmelCase,__lowerCAmelCase,unittest.TestCase ):
_a = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_a = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Any:
_A : int = TFDPRModelTester(self )
_A : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Any:
_A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_UpperCAmelCase )
def a__ ( self ) -> List[str]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_UpperCAmelCase )
def a__ ( self ) -> Any:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_UpperCAmelCase )
@slow
def a__ ( self ) -> Optional[Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Union[str, Any] = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : str = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : List[str] = TFDPRQuestionEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : List[str] = TFDPRReader.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> int:
_A : Any = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_A : Tuple = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_A : Tuple = model(_UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A : str = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
a__ : Dict = HfArgumentParser(_snake_case)
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : List[str] = TensorFlowBenchmark(args=_snake_case)
try:
a__ : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Dict = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
a__ : Dict = """ """.join(str(_snake_case).split(""" """)[:-1])
a__ : Any = """"""
a__ : Optional[int] = eval(str(_snake_case).split(""" """)[-1])
a__ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(_snake_case)
if len(_snake_case) > 0:
a__ : List[str] = full_error_msg + begin_error_msg + str(_snake_case)
raise ValueError(_snake_case)
benchmark.run()
if __name__ == "__main__":
main()
| 170 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 0 |
'''simple docstring'''
UpperCAmelCase = '''Tobias Carryer'''
from time import time
class lowerCAmelCase :
def __init__( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple=int(time() ) ): # noqa: B008
"""simple docstring"""
__lowercase =multiplier
__lowercase =increment
__lowercase =modulo
__lowercase =seed
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 141 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case( __magic_name__=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowercase : Union[str, Any] = subparsers.add_parser('''env''' )
else:
lowercase : Optional[Any] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_snake_case , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : int = torch.__version__
lowercase : str = torch.cuda.is_available()
lowercase : Union[str, Any] = is_xpu_available()
lowercase : Optional[int] = is_npu_available()
lowercase : Any = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_snake_case ):
lowercase : Any = load_config_from_file(args.config_file ).to_dict()
lowercase : Union[str, Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(_snake_case ),
'''PyTorch NPU available''': str(_snake_case ),
'''System RAM''': F"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowercase : str = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
lowercase : Any = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_snake_case , _snake_case )
else F"""\t{accelerate_config}"""
)
print(_snake_case )
lowercase : Dict = accelerate_config
return info
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Optional[Any] = env_command_parser()
lowercase : Optional[Any] = parser.parse_args()
env_command(_snake_case )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 308 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : Callable , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
lowerCamelCase = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase = np.zeros((n + 1,) )
lowerCamelCase = ya
lowerCamelCase = xa
for k in range(_snake_case ):
lowerCamelCase = y[k] + step_size * ode_func(_snake_case , y[k] )
lowerCamelCase = y[k] + (
(step_size / 2) * (ode_func(_snake_case , y[k] ) + ode_func(x + step_size , _snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315 | 0 |
from ... import PretrainedConfig
__A = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
lowercase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase_ = '''nezha'''
def __init__(self : str , UpperCAmelCase_ : List[Any]=21_128 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-1_2 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Tuple=True , **UpperCAmelCase_ : List[Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: str =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =num_attention_heads
lowerCamelCase__: Optional[int] =hidden_act
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: List[Any] =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: str =max_position_embeddings
lowerCamelCase__: Optional[Any] =max_relative_position
lowerCamelCase__: Optional[int] =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: Optional[int] =layer_norm_eps
lowerCamelCase__: Union[str, Any] =classifier_dropout
lowerCamelCase__: Any =use_cache
| 10 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase__ : List[str] = model_type_to_module_name(_snake_case )
UpperCamelCase__ : Any = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(_snake_case , _snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_snake_case , '''__name__''' , _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase__ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_snake_case , _snake_case ):
return getattr(_snake_case , _snake_case )
return None
def _a ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Tuple , ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = get_file_from_repo(
_snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_snake_case , encoding='''utf-8''' ) as reader:
return json.load(_snake_case )
class __magic_name__ :
def __init__( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def UpperCAmelCase__ ( cls : Any , lowerCamelCase__ : str , **lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : int = kwargs.pop('''config''' , _UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = kwargs.pop('''trust_remote_code''' , _UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ , UpperCamelCase__ : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = config_dict.get('''image_processor_type''' , _UpperCAmelCase )
UpperCamelCase__ : List[Any] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
UpperCamelCase__ : Dict = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase__ : Dict = config_dict.pop('''feature_extractor_type''' , _UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
UpperCamelCase__ : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCamelCase__ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
UpperCamelCase__ : int = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# It could be in `config.image_processor_type``
UpperCamelCase__ : List[str] = getattr(_UpperCAmelCase , '''image_processor_type''' , _UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase__ : Optional[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
UpperCamelCase__ : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
UpperCamelCase__ : Optional[int] = image_processor_auto_map is not None
UpperCamelCase__ : Optional[Any] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase__ : Dict = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase__ : List[str] = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase__ : Optional[int] = kwargs.pop('''code_revision''' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase__ : Optional[Any] = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
| 146 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ):
"""simple docstring"""
lowerCAmelCase : Dict = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : Optional[int] = use_attention_mask
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = num_choices
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_attention_mask:
lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : str = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = config_and_inputs
lowerCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =True
a : Dict =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCAmelCase )
lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase : Any = model(_UpperCAmelCase )[0]
lowerCAmelCase : Dict = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase : Any = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase : Any = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 108 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
import qiskit
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> qiskit.result.counts.Counts:
UpperCamelCase : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
UpperCamelCase : List[str] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(_snake_case , _snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
__lowerCamelCase : Dict = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 52 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 243 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_snake_case = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_snake_case = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_snake_case = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_snake_case = {"mustc": MUSTC_LANGS}
class lowercase ( __lowerCAmelCase ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = []
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a=False , _a=False , _a=None , _a=None , _a = None , **_a , ) -> str:
_A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A : Tuple = do_upper_case
_A : Tuple = do_lower_case
_A : int = load_json(_UpperCAmelCase )
_A : Any = {v: k for k, v in self.encoder.items()}
_A : Optional[int] = spm_file
_A : Optional[Any] = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
_A : List[str] = lang_codes
_A : List[str] = LANGUAGES[lang_codes]
_A : List[Any] = [F'''<lang:{lang}>''' for lang in self.langs]
_A : int = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
_A : Optional[Any] = self.lang_tokens
_A : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_A : Dict = {}
@property
def a__ ( self ) -> List[str]:
return len(self.encoder )
@property
def a__ ( self ) -> Dict:
return self._tgt_lang
@tgt_lang.setter
def a__ ( self , _a ) -> str:
_A : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def a__ ( self , _a ) -> List[str]:
_A : Union[str, Any] = self.lang_code_to_id[tgt_lang]
_A : List[str] = [lang_code_id]
def a__ ( self , _a ) -> str:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a__ ( self , _a ) -> int:
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def a__ ( self , _a ) -> Dict:
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def a__ ( self , _a ) -> str:
_A : List[str] = []
_A : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_A : Optional[int] = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_A : str = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a__ ( self , _a , _a=None ) -> Any:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a__ ( self , _a , _a = None , _a = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
_A : int = [1] * len(self.prefix_tokens )
_A : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def a__ ( self ) -> List[Any]:
_A : List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_A : Union[str, Any] = self.__dict__.copy()
_A : Union[str, Any] = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
_A : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : Dict = {}
_A : List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ ( self , _a , _a = None ) -> List[str]:
_A : Optional[int] = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
_A : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_A : List[str] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
_A : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = sentencepiece.SentencePieceProcessor(**_snake_case )
spm.Load(str(_snake_case ) )
return spm
def lowerCAmelCase_ ( snake_case_ ):
with open(_snake_case,"""r""" ) as f:
return json.load(_snake_case )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
with open(_snake_case,"""w""" ) as f:
json.dump(_snake_case,_snake_case,indent=2 )
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase : Union[str, Any] =8
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Union[str, Any]=BITS) -> Dict:
"""simple docstring"""
a__ : str = x.device
a__ : List[str] = (x * 255).int().clamp(0 , 255)
a__ : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case)
a__ : List[str] = rearrange(_snake_case , """d -> d 1 1""")
a__ : Tuple = rearrange(_snake_case , """b c h w -> b c 1 h w""")
a__ : Optional[int] = ((x & mask) != 0).float()
a__ : str = rearrange(_snake_case , """b c d h w -> b (c d) h w""")
a__ : Union[str, Any] = bits * 2 - 1
return bits
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Optional[int]=BITS) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = x.device
a__ : Optional[Any] = (x > 0).int()
a__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case , dtype=torch.intaa)
a__ : Any = rearrange(_snake_case , """d -> d 1 1""")
a__ : List[Any] = rearrange(_snake_case , """b (c d) h w -> b c d h w""" , d=8)
a__ : int = reduce(x * mask , """b c d h w -> b c h w""" , """sum""")
return (dec / 255).clamp(0.0 , 1.0)
def lowerCAmelCase_ ( self : List[Any] , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : float = 0.0 , _lowercase : bool = True , _lowercase : int=None , _lowercase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler""")
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
a__ : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
a__ : int = self.alphas_cumprod[timestep]
a__ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
a__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
a__ : int = self.bit_scale
if self.config.clip_sample:
a__ : List[Any] = torch.clamp(_snake_case , -scale , _snake_case)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
a__ : List[Any] = self._get_variance(_snake_case , _snake_case)
a__ : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
a__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
a__ : Union[str, Any] = model_output.device if torch.is_tensor(_snake_case) else """cpu"""
a__ : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_snake_case).to(_snake_case)
a__ : List[Any] = self._get_variance(_snake_case , _snake_case) ** 0.5 * eta * noise
a__ : Tuple = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case)
def lowerCAmelCase_ ( self : Any , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : int="epsilon" , _lowercase : Union[str, Any]=None , _lowercase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
a__ : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
a__ , a__ : List[str] = torch.split(_snake_case , sample.shape[1] , dim=1)
else:
a__ : List[str] = None
# 1. compute alphas, betas
a__ : Tuple = self.alphas_cumprod[t]
a__ : Dict = self.alphas_cumprod[t - 1] if t > 0 else self.one
a__ : Any = 1 - alpha_prod_t
a__ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
a__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
a__ : Union[str, Any] = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''')
# 3. Clip "predicted x_0"
a__ : Tuple = self.bit_scale
if self.config.clip_sample:
a__ : Optional[Any] = torch.clamp(_snake_case , -scale , _snake_case)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
a__ : Union[str, Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a__ : Optional[int] = 0
if t > 0:
a__ : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_snake_case).to(model_output.device)
a__ : List[str] = (self._get_variance(_snake_case , predicted_variance=_snake_case) ** 0.5) * noise
a__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case)
class snake_case__ (__lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase = 1.0 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
a__ : str = bit_scale
a__ : Optional[Any] = (
ddim_bit_scheduler_step if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , __lowercase = 2_5_6 , __lowercase = 2_5_6 , __lowercase = 5_0 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ) -> Any:
"""simple docstring"""
a__ : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_UpperCAmelCase , )
a__ : Dict = decimal_to_bits(_UpperCAmelCase ) * self.bit_scale
a__ : List[Any] = latents.to(self.device )
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
a__ : Dict = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Optional[int] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
a__ : Any = bits_to_decimal(_UpperCAmelCase )
if output_type == "pil":
a__ : Optional[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 170 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase ( __lowerCAmelCase ):
lowerCAmelCase_ = '''Salesforce/blip-image-captioning-base'''
lowerCAmelCase_ = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
lowerCAmelCase_ = '''image_captioner'''
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ['''image''']
lowerCAmelCase_ = ['''text''']
def __init__( self : str , *__lowercase : Dict , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def snake_case ( self : Tuple , __lowercase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_UpperCAmelCase , return_tensors='pt' )
def snake_case ( self : Dict , __lowercase : Any ):
"""simple docstring"""
return self.model.generate(**_UpperCAmelCase )
def snake_case ( self : int , __lowercase : Tuple ):
"""simple docstring"""
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
| 141 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _A ( __lowerCAmelCase ):
_UpperCamelCase : str = '''vit_msn'''
def __init__( self : Optional[Any] , _A : List[str]=768 , _A : List[str]=12 , _A : Union[str, Any]=12 , _A : List[str]=3_072 , _A : int="gelu" , _A : List[str]=0.0 , _A : str=0.0 , _A : Optional[int]=0.02 , _A : int=1E-06 , _A : Tuple=224 , _A : Union[str, Any]=16 , _A : List[str]=3 , _A : Dict=True , **_A : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
lowercase : List[Any] = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : str = initializer_range
lowercase : Union[str, Any] = layer_norm_eps
lowercase : str = image_size
lowercase : int = patch_size
lowercase : Union[str, Any] = num_channels
lowercase : List[str] = qkv_bias
| 308 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """depth_multiplier""" ) )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=3 , A=32 , A=0.25 , A=8 , A=8 , A=6 , A=32 , A=True , A=True , A=True , A="relu6" , A=12_80 , A=0.1 , A=0.02 , A=True , A=True , A=10 , A=None , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = depth_multiplier
lowerCamelCase = depth_divisible_by
lowerCamelCase = min_depth
lowerCamelCase = expand_ratio
lowerCamelCase = tf_padding
lowerCamelCase = output_stride
lowerCamelCase = first_layer_is_expansion
lowerCamelCase = finegrained_output
lowerCamelCase = hidden_act
lowerCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowerCamelCase = classifier_dropout_prob
lowerCamelCase = use_labels
lowerCamelCase = is_training
lowerCamelCase = num_labels
lowerCamelCase = initializer_range
lowerCamelCase = scope
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self ) -> Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = MobileNetVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __A ( self , A , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MobileNetVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MobileNetVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[int] = False
UpperCamelCase : Dict = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : str = False
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = MobileNetVaModelTester(self )
lowerCamelCase = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def __A ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_UpperCAmelCase )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
lowerCamelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = 16
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = MobileNetVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_UpperCAmelCase )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_UpperCAmelCase )
# verify the logits
lowerCamelCase = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowerCamelCase = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowerCamelCase = model.to(_UpperCAmelCase )
lowerCamelCase = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_UpperCAmelCase )
lowerCamelCase = outputs.logits
# verify the logits
lowerCamelCase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _UpperCAmelCase )
lowerCamelCase = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 252 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A = get_logger(__name__)
__A = Path(__file__).parent / "model_card_template.md"
__A = uuida().hex
__A = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowerCAmelCase_ ( __a = None ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def lowerCAmelCase_ ( __a , __a = None , __a = None ) -> str:
"""simple docstring"""
if token is None:
lowerCamelCase__: Dict =HfFolder.get_token()
if organization is None:
lowerCamelCase__: Optional[Any] =whoami(_snake_case )["name"]
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(_snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__: Tuple =args.hub_token if hasattr(_snake_case , "hub_token" ) else None
lowerCamelCase__: Tuple =get_full_repo_name(_snake_case , token=_snake_case )
lowerCamelCase__: Dict =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(_snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__: Optional[Any] =os.path.join(args.output_dir , "README.md" )
model_card.save(_snake_case )
def lowerCAmelCase_ ( __a , __a = None ) -> List[Any]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__: Tuple =str(Path(_snake_case ).as_posix() )
lowerCamelCase__: Any =re.search(R"snapshots/([^/]+)/" , _snake_case )
if search is None:
return None
lowerCamelCase__: Optional[int] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A = os.path.join(hf_cache_home, "diffusers")
def lowerCAmelCase_ ( __a = None , __a = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCamelCase__: Any =DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__: Optional[int] =old_diffusers_cache
lowerCamelCase__: List[str] =Path(_snake_case ).expanduser()
lowerCamelCase__: Optional[Any] =Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__: str =new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A = 0
else:
with open(cache_version_file) as f:
try:
__A = int(f.read())
except ValueError:
__A = 0
if cache_version < 1:
__A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def lowerCAmelCase_ ( __a , __a = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCamelCase__: List[Any] =weights_name.split("." )
lowerCamelCase__: int =splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__: int =".".join(_snake_case )
return weights_name
def lowerCAmelCase_ ( __a , *,
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a=None , ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__: int =os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
lowerCamelCase__: Optional[int] =os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
lowerCamelCase__: List[str] =hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.""" , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.""" , _snake_case , )
try:
# 2. Load model file as usual
lowerCamelCase__: Dict =hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
F"""\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'." )
except EnvironmentError:
raise EnvironmentError(
F"""Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from """
"\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. "
F"""Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 10 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
def _a ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase__ : str = set()
# Replace all the whitespace in our sentence
UpperCamelCase__ : Any = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_snake_case ) == 26
def _a ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase__ : int = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase__ : List[Any] = True
elif char.isupper():
UpperCamelCase__ : int = True
return all(_snake_case )
def _a ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase__ : Tuple = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=_snake_case ) )
print(timeit('''is_pangram_faster()''' , setup=_snake_case ) )
print(timeit('''is_pangram_fastest()''' , setup=_snake_case ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 146 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a : List[str] =ShapEImgaImgPipeline
a : Dict =['''image''']
a : List[Any] =['''image''']
a : Optional[Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
a : Union[str, Any] =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase : Optional[int] = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase : int = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : str = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase : List[str] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.dummy_prior
lowerCAmelCase : List[str] = self.dummy_image_encoder
lowerCAmelCase : Optional[int] = self.dummy_image_processor
lowerCAmelCase : Optional[Any] = self.dummy_renderer
lowerCAmelCase : Dict = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("mps" ):
lowerCAmelCase : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
lowerCAmelCase : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase : Any = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "cpu"
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : str = self.pipeline_class(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowerCAmelCase : List[str] = output.images[0]
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase : List[str] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = torch_device == "cpu"
lowerCAmelCase : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Tuple = self.pipeline_class(**_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase : str = 1
lowerCAmelCase : Any = 2
lowerCAmelCase : int = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase : int = batch_size * [inputs[key]]
lowerCAmelCase : List[str] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase : str = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase : Union[str, Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase : Optional[Any] = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 108 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 0 |
import numpy as np
from transformers import Pipeline
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = np.max(_snake_case , axis=-1 , keepdims=_snake_case )
lowercase__ : Union[str, Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case )
class snake_case__(__lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {}
if "second_text" in kwargs:
lowercase__ : List[Any] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None ):
return self.tokenizer(_UpperCAmelCase , text_pair=_UpperCAmelCase , return_tensors=self.framework )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
return self.model(**_UpperCAmelCase )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[str] = model_outputs.logits[0].numpy()
lowercase__ : List[str] = softmax(_UpperCAmelCase )
lowercase__ : List[Any] = np.argmax(_UpperCAmelCase )
lowercase__ : Optional[int] = self.model.config.idalabel[best_class]
lowercase__ : Union[str, Any] = probabilities[best_class].item()
lowercase__ : int = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 130 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase : Optional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_snake_case )
UpperCamelCase : List[str] = checkpoints.load_tax_checkpoint(_snake_case )
UpperCamelCase : List[str] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
UpperCamelCase : Optional[Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase : Dict = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"
" attribute with a value from [\'local\', \'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase : List[str] = F"""layers_{str(_snake_case )}"""
# Self-Attention
UpperCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
UpperCamelCase : Any = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
UpperCamelCase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
UpperCamelCase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
UpperCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
UpperCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase : List[Any] = flax_model.params["encoder"]["block"][str(_snake_case )]["layer"]
UpperCamelCase : List[Any] = tax_attention_key
UpperCamelCase : Any = tax_attention_out
UpperCamelCase : Optional[Any] = tax_attention_query
UpperCamelCase : Any = tax_attention_value
UpperCamelCase : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase : Optional[Any] = tax_mlp_wi_a
UpperCamelCase : List[str] = tax_mlp_wi_a
else:
UpperCamelCase : Optional[int] = tax_mlp_wi
UpperCamelCase : int = tax_mlp_wo
UpperCamelCase : Dict = tax_mlp_layer_norm
UpperCamelCase : str = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase : Tuple = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase : List[str] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase : Optional[Any] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
UpperCamelCase : Any = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase : str = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
UpperCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase : Dict = F"""layers_{str(_snake_case )}"""
# Self-Attention
UpperCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
UpperCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
UpperCamelCase : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
UpperCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
UpperCamelCase : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
UpperCamelCase : Any = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
UpperCamelCase : Optional[Any] = tax_enc_dec_attention_module["key"]["kernel"]
UpperCamelCase : str = tax_enc_dec_attention_module["out"]["kernel"]
UpperCamelCase : str = tax_enc_dec_attention_module["query"]["kernel"]
UpperCamelCase : str = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
UpperCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
UpperCamelCase : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase : List[str] = flax_model.params["decoder"]["block"][str(_snake_case )]["layer"]
UpperCamelCase : Optional[int] = tax_attention_key
UpperCamelCase : Dict = tax_attention_out
UpperCamelCase : Optional[Any] = tax_attention_query
UpperCamelCase : List[str] = tax_attention_value
UpperCamelCase : str = tax_pre_attention_layer_norm
UpperCamelCase : Dict = tax_enc_dec_attention_key
UpperCamelCase : int = tax_enc_dec_attention_out
UpperCamelCase : List[Any] = tax_enc_dec_attention_query
UpperCamelCase : Any = tax_enc_dec_attention_value
UpperCamelCase : str = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase : Tuple = tax_mlp_wi_a
UpperCamelCase : Dict = tax_mlp_wi_a
else:
UpperCamelCase : Any = tax_mlp_wi
UpperCamelCase : Optional[Any] = tax_mlp_wo
UpperCamelCase : Tuple = txa_mlp_layer_norm
UpperCamelCase : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase : Union[str, Any] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
UpperCamelCase : Optional[Any] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase : Any = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase : Any = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase : List[str] = tax_model["target"]["token_embedder"]["embedding"]
UpperCamelCase : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_snake_case )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 52 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( __lowerCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = relative_attention
a_ = position_biased_input
a_ = pos_att_type
a_ = scope
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) ->Optional[int]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.get_config()
a_ = 3_00
return config
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
self.parent.assertListEqual(list(result.loss.size()) , [])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = DebertaModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase)[0]
a_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase)[0]
a_ = model(_UpperCAmelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = DebertaForMaskedLM(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = self.num_labels
a_ = DebertaForSequenceClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(_UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = self.num_labels
a_ = DebertaForTokenClassification(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = DebertaForQuestionAnswering(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
a_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a_ : Any = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[int] = True
a_ : Union[str, Any] = False
a_ : Union[str, Any] = False
a_ : str = False
a_ : Optional[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = DebertaModelTester(self)
a_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Any:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = DebertaModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet")
def UpperCAmelCase__ ( self) ->List[Any]:
pass
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = DebertaModel.from_pretrained("microsoft/deberta-base")
a_ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
a_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)[0]
# compare the actual values for a slice.
a_ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4) , F'''{output[:, 1:4, 1:4]}''')
| 243 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_snake_case = logging.getLogger(__name__)
_snake_case = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""",type=_snake_case,default="""roberta-base""",help="""The model config to use. Note that we don\'t copy the model\'s weights, only the config!""",)
parser.add_argument(
"""--tokenizer""",type=_snake_case,default="""unigram-tokenizer-wikitext""",help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.""",)
parser.add_argument(
"""--per_replica_batch_size""",type=_snake_case,default=8,help="""Batch size per TPU core.""",)
parser.add_argument(
"""--no_tpu""",action="""store_true""",help="""If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.""",)
parser.add_argument(
"""--tpu_name""",type=_snake_case,help="""Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.""",default="""local""",)
parser.add_argument(
"""--tpu_zone""",type=_snake_case,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""",)
parser.add_argument(
"""--gcp_project""",type=_snake_case,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""",action="""store_true""",help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""",)
parser.add_argument(
"""--train_dataset""",type=_snake_case,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--shuffle_buffer_size""",type=_snake_case,default=2**18,help="""Size of the shuffle buffer (in samples)""",)
parser.add_argument(
"""--eval_dataset""",type=_snake_case,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""",)
parser.add_argument(
"""--num_epochs""",type=_snake_case,default=1,help="""Number of epochs to train for.""",)
parser.add_argument(
"""--learning_rate""",type=_snake_case,default=1e-4,help="""Learning rate to use for training.""",)
parser.add_argument(
"""--weight_decay_rate""",type=_snake_case,default=1e-3,help="""Weight decay rate to use for training.""",)
parser.add_argument(
"""--max_length""",type=_snake_case,default=512,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""",)
parser.add_argument(
"""--mlm_probability""",type=_snake_case,default=0.15,help="""Fraction of tokens to mask during training.""",)
parser.add_argument("""--output_dir""",type=_snake_case,required=_snake_case,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""",type=_snake_case,help="""Model ID to upload to on the Hugging Face Hub.""" )
_A : Dict = parser.parse_args()
return args
def lowerCAmelCase_ ( snake_case_ ):
try:
if args.tpu_name:
_A : str = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name,zone=args.tpu_zone,project=args.gcp_project )
else:
_A : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_snake_case )
tf.tpu.experimental.initialize_tpu_system(_snake_case )
return tpu
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = 0
for file in file_list:
_A : Tuple = file.split("""/""" )[-1]
_A : Union[str, Any] = re.search(r"""-\d+-(\d+)\.tfrecord""",_snake_case ).group(1 )
_A : Tuple = int(_snake_case )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=None ):
_A : str = count_samples(_snake_case )
_A : Union[str, Any] = tf.data.Dataset.from_tensor_slices(_snake_case )
if shuffle:
_A : Tuple = dataset.shuffle(len(_snake_case ) )
_A : Any = tf.data.TFRecordDataset(_snake_case,num_parallel_reads=_snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_A : Tuple = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) )
_A : Optional[int] = dataset.map(_snake_case,num_parallel_calls=_snake_case )
if shuffle:
assert shuffle_buffer_size is not None
_A : str = dataset.shuffle(args.shuffle_buffer_size )
_A : Any = dataset.batch(_snake_case,drop_remainder=_snake_case )
_A : int = dataset.map(_snake_case,num_parallel_calls=_snake_case )
_A : Tuple = dataset.prefetch(_snake_case )
return dataset
def lowerCAmelCase_ ( snake_case_ ):
if not args.no_tpu:
_A : Optional[Any] = initialize_tpu(_snake_case )
_A : Union[str, Any] = tf.distribute.TPUStrategy(_snake_case )
else:
_A : Any = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
_A : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
_A : Tuple = AutoConfig.from_pretrained(args.pretrained_model_config )
_A : List[Any] = tokenizer.vocab_size
_A : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_A : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_A : Optional[int] = count_samples(_snake_case )
_A : List[str] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_A : List[str] = steps_per_epoch * args.num_epochs
with strategy.scope():
_A : Union[str, Any] = TFAutoModelForMaskedLM.from_config(_snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_A , _A : str = create_optimizer(
num_train_steps=_snake_case,num_warmup_steps=total_train_steps // 20,init_lr=args.learning_rate,weight_decay_rate=args.weight_decay_rate,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case,metrics=["""accuracy"""] )
def decode_fn(snake_case_ ):
_A : Dict = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_snake_case,_snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_A : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case,mlm_probability=args.mlm_probability,mlm=_snake_case,return_tensors="""tf""" )
def mask_with_collator(snake_case_ ):
# TF really needs an isin() function
_A : List[Any] = (
~tf.cast(batch["""attention_mask"""],tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
_A , _A : Optional[int] = data_collator.tf_mask_tokens(
batch["""input_ids"""],vocab_size=len(_snake_case ),mask_token_id=tokenizer.mask_token_id,special_tokens_mask=_snake_case,)
return batch
_A : Dict = args.per_replica_batch_size * strategy.num_replicas_in_sync
_A : Tuple = prepare_dataset(
_snake_case,decode_fn=_snake_case,mask_fn=_snake_case,batch_size=_snake_case,shuffle=_snake_case,shuffle_buffer_size=args.shuffle_buffer_size,)
_A : List[Any] = prepare_dataset(
_snake_case,decode_fn=_snake_case,mask_fn=_snake_case,batch_size=_snake_case,shuffle=_snake_case,)
_A : Any = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir,hub_model_id=args.hub_model_id,tokenizer=_snake_case ) )
model.fit(
_snake_case,validation_data=_snake_case,epochs=args.num_epochs,callbacks=_snake_case,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_snake_case = parse_args()
main(args)
| 26 |
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ (__lowerCAmelCase ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , __lowercase = 1 , __lowercase = None , __lowercase = 0.0 , __lowercase = 5_0 , __lowercase = "pil" , __lowercase = True , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
a__ : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_UpperCAmelCase , )
a__ : Union[str, Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
a__ : str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : Tuple = {}
if accepts_eta:
a__ : Any = eta
for t in self.progress_bar(self.scheduler.timesteps ):
a__ : Dict = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
a__ : Optional[int] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : List[Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VAE
a__ : Optional[int] = self.vqvae.decode(_UpperCAmelCase ).sample
a__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Optional[int] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 170 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''input_values''', '''attention_mask''']
def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
_A = do_normalize
_A = return_attention_mask
_A = num_mel_bins
_A = hop_length
_A = win_length
_A = win_function
_A = frame_signal_scale
_A = fmin
_A = fmax
_A = mel_floor
_A = reduction_factor
_A = win_length * sampling_rate // 1_000
_A = hop_length * sampling_rate // 1_000
_A = optimal_fft_length(self.sample_size )
_A = (self.n_fft // 2) + 1
_A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
_A = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
_A = np.array(_UpperCAmelCase , np.intaa )
_A = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(_UpperCAmelCase )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ):
_A = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ):
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = None
if audio_target is not None:
_A = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
_A = inputs_target['input_values']
_A = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_A = decoder_attention_mask
return inputs
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ):
_A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
_A = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_A = speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [speech]
# needed to make pad() work on spectrogram inputs
_A = self.feature_size
# convert into correct format for padding
if is_target:
_A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech]
_A = BatchFeature({'input_values': features} )
_A = self.num_mel_bins
else:
_A = BatchFeature({'input_values': speech} )
_A = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
_A = feature_size_hack
# convert input values to correct format
_A = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_A = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_A = input_values.astype(np.floataa )
# convert attention_mask to correct format
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_A = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Any ):
_A = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 315 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str, **lowercase__ : Dict ):
'''simple docstring'''
__lowercase =AutoConfig.from_pretrained(_snake_case, **_snake_case )
__lowercase =AutoModelForSeqaSeqLM.from_config(_snake_case )
model.save_pretrained(_snake_case )
AutoTokenizer.from_pretrained(_snake_case ).save_pretrained(_snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 141 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _A ( unittest.TestCase ):
def __init__( self : Optional[Any] , _A : List[str] , _A : Tuple=13 , _A : Tuple=7 , _A : Tuple=True , _A : Dict=True , _A : str=True , _A : List[str]=True , _A : Union[str, Any]=99 , _A : Any=32 , _A : Union[str, Any]=5 , _A : int=4 , _A : str=37 , _A : Any="gelu" , _A : int=0.1 , _A : Union[str, Any]=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Any=2 , _A : Optional[int]=0.02 , _A : List[str]=4 , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = parent
lowercase : int = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Any = is_training
lowercase : Dict = use_attention_mask
lowercase : List[Any] = use_token_type_ids
lowercase : int = use_labels
lowercase : Optional[int] = vocab_size
lowercase : Dict = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : List[Any] = type_vocab_size
lowercase : int = type_sequence_label_size
lowercase : Tuple = initializer_range
lowercase : Tuple = num_choices
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_attention_mask:
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[Any] = config_and_inputs
lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _A ( __lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase : Optional[int] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowercase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class _A ( unittest.TestCase ):
@slow
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Dict = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase : Any = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase : int = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 308 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase : List[str] = getLogger(__name__)
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int = 8 , lowerCamelCase__ : int = 1024 , lowerCamelCase__ : int="val" , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple="summarization" , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Dict = None , lowerCamelCase__ : str="" , **lowerCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowerCamelCase = str(_snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=_snake_case )
lowerCamelCase = Path(_snake_case )
lowerCamelCase = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_snake_case )
lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda()
if fpaa:
lowerCamelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params
lowerCamelCase = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCamelCase = num_return_sequences
lowerCamelCase = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCamelCase = tokenizer.model_max_length
if prefix is None:
lowerCamelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
lowerCamelCase = SeqaSeqDataset(
_snake_case , _snake_case , _snake_case , max_target_length=1024 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCamelCase = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case )
lowerCamelCase = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn )
lowerCamelCase = []
for batch in tqdm(_snake_case ):
lowerCamelCase = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , )
lowerCamelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
lowerCamelCase = batch["""ids"""]
if num_return_sequences > 1:
lowerCamelCase = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_snake_case ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(_snake_case , _snake_case )
return results, sampler.num_replicas
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=_snake_case , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=_snake_case , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=_snake_case , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=_snake_case , default=_snake_case )
parser.add_argument(
"""--type_path""" , type=_snake_case , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=_snake_case , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_snake_case , default=8 , required=_snake_case , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=_snake_case , default=-1 , required=_snake_case , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=_snake_case , default=_snake_case , required=_snake_case , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=_snake_case , default=1 , required=_snake_case , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=_snake_case , default=600 , required=_snake_case , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument("""--tgt_lang""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"""--prefix""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowerCamelCase = time.time()
lowerCamelCase , lowerCamelCase = parser.parse_known_args()
lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
lowerCamelCase = Path(args.save_dir + """_tmp""" )
Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking.
lowerCamelCase = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCamelCase = {}
if args.src_lang is not None:
lowerCamelCase = args.src_lang
if args.tgt_lang is not None:
lowerCamelCase = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_snake_case )
lowerCamelCase , lowerCamelCase = eval_data_dir(
args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , )
if args.local_rank <= 0:
lowerCamelCase = Path(args.save_dir )
save_dir.mkdir(exist_ok=_snake_case )
lowerCamelCase = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout )
lowerCamelCase = combine_partial_results(_snake_case )
if args.num_return_sequences > 1:
lowerCamelCase = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_snake_case , _snake_case )
return
lowerCamelCase = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(_snake_case ) as f:
lowerCamelCase = [x.rstrip() for x in f.readlines()][: len(_snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCamelCase = """translation""" in args.task
lowerCamelCase = calculate_bleu if calc_bleu else calculate_rouge
lowerCamelCase = """bleu""" if calc_bleu else """rouge"""
lowerCamelCase = score_fn(_snake_case , _snake_case )
lowerCamelCase = len(_snake_case )
lowerCamelCase = time.time() - start_time
lowerCamelCase = round(runtime / metrics["""n_obs"""] , 4 )
lowerCamelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCamelCase = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_snake_case , _snake_case , indent=_snake_case )
print(_snake_case )
write_txt_file(_snake_case , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_snake_case , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_snake_case )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase = []
for partial_result in partial_results:
records.extend(_snake_case )
lowerCamelCase = sorted(_snake_case , key=lambda lowerCamelCase__ : x["id"] )
lowerCamelCase = [x["""pred"""] for x in records]
return preds
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = time.time()
logger.info("""waiting for all nodes to finish""" )
lowerCamelCase = None
while (time.time() - start_wait) < timeout:
lowerCamelCase = list(save_dir.glob("""rank_*.json""" ) )
if len(_snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCamelCase = lmap(_snake_case , _snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 252 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
lowercase_ = '''time_series_transformer'''
lowercase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__(self : List[Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "student_t" , UpperCAmelCase_ : str = "nll" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase_ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : Optional[int] , ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =prediction_length
lowerCamelCase__: int =context_length or prediction_length
lowerCamelCase__: List[Any] =distribution_output
lowerCamelCase__: Any =loss
lowerCamelCase__: Optional[Any] =input_size
lowerCamelCase__: Any =num_time_features
lowerCamelCase__: Optional[int] =lags_sequence
lowerCamelCase__: Tuple =scaling
lowerCamelCase__: Dict =num_dynamic_real_features
lowerCamelCase__: List[str] =num_static_real_features
lowerCamelCase__: Optional[Any] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
lowerCamelCase__: Any =cardinality
else:
lowerCamelCase__: List[str] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
lowerCamelCase__: List[str] =embedding_dimension
else:
lowerCamelCase__: Optional[Any] =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
lowerCamelCase__: Tuple =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase__: Any =input_size * len(_UpperCAmelCase) + self._number_of_features
lowerCamelCase__: Dict =d_model
lowerCamelCase__: Dict =encoder_attention_heads
lowerCamelCase__: Dict =decoder_attention_heads
lowerCamelCase__: Union[str, Any] =encoder_ffn_dim
lowerCamelCase__: int =decoder_ffn_dim
lowerCamelCase__: Any =encoder_layers
lowerCamelCase__: Tuple =decoder_layers
lowerCamelCase__: Any =dropout
lowerCamelCase__: Dict =attention_dropout
lowerCamelCase__: str =activation_dropout
lowerCamelCase__: int =encoder_layerdrop
lowerCamelCase__: List[Any] =decoder_layerdrop
lowerCamelCase__: str =activation_function
lowerCamelCase__: Optional[Any] =init_std
lowerCamelCase__: Tuple =use_cache
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 10 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
"""simple docstring"""
a : str =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a : ClassVar[Features] =Features({"text": Value("string" )} )
a : ClassVar[Features] =Features({"labels": ClassLabel} )
a : str ="text"
a : str ="labels"
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase : Optional[int] = copy.deepcopy(self )
lowerCAmelCase : List[str] = self.label_schema.copy()
lowerCAmelCase : List[Any] = features[self.label_column]
lowerCAmelCase : int = label_schema
return task_template
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 108 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
import torch
from transformers import AutoModel
class snake_case__(torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[Any]="sayef/fsner-bert-base-uncased" ):
super(_UpperCAmelCase , self ).__init__()
lowercase__ : Optional[int] = AutoModel.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__ : Any = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase__ : str = torch.nn.Softmax(dim=1 )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : int ):
return self.bert(**_UpperCAmelCase ).last_hidden_state
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] ):
return token_embeddings.sum(2 , keepdim=_UpperCAmelCase )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=1 ):
return self.softmax(T * self.cos(_UpperCAmelCase , _UpperCAmelCase ) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : int = W_supports["sizes"].tolist()
lowercase__ : Union[str, Any] = W_supports["start_token_id"].item()
lowercase__ : Tuple = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__ : Any = self.BERT(**_UpperCAmelCase )
lowercase__ : Union[str, Any] = self.BERT(**_UpperCAmelCase )
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
lowercase__ : List[Any] = W_supports["input_ids"] == start_token_id
lowercase__ : Dict = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__ : Any = 0
else:
lowercase__ : Optional[int] = support_sizes[i - 1]
lowercase__ : Optional[int] = S[s : s + size][start_token_masks[s : s + size]]
lowercase__ : int = S[s : s + size][end_token_masks[s : s + size]]
lowercase__ : Any = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__ : Union[str, Any] = torch.vstack((p_starts, p_start) )
lowercase__ : Any = torch.vstack((p_ends, p_end) )
else:
lowercase__ : List[str] = p_start
lowercase__ : Tuple = p_end
return p_starts, p_ends
| 130 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = {}
def __UpperCamelCase( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase , " -> " , " -> ".join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
UpperCamelCase : Union[str, Any] = [to_vertex]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = True
print(_UpperCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 52 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = int(_snake_case )
a_ , a_ , a_ = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=300 ) ->Tuple:
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
a_ = F'''{elt:.6f}''' if isinstance(_snake_case , _snake_case ) else str(_snake_case )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
a_ : Dict = 5
a_ : int = 0.2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 3_00 , ) ->Optional[int]:
a_ = total
a_ = "" if prefix is None else prefix
a_ = leave
a_ = parent
a_ = width
a_ = None
a_ = None
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None) ->List[str]:
a_ = value
if comment is not None:
a_ = comment
if self.last_value is None:
a_ = a_ = time.time()
a_ = a_ = value
a_ = a_ = None
a_ = self.warmup
a_ = 1
self.update_bar(_UpperCAmelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
a_ = time.time()
a_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
a_ = self.elapsed_time / (value - self.start_value)
else:
a_ = None
if value >= self.total:
a_ = self.total
a_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
a_ = self.average_time_per_item * (self.total - value)
self.update_bar(_UpperCAmelCase)
a_ = value
a_ = current_time
if self.average_time_per_item is None:
a_ = 1
else:
a_ = max(int(self.update_every / self.average_time_per_item) , 1)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None) ->Optional[int]:
a_ = " " * (len(str(self.total)) - len(str(_UpperCAmelCase))) + str(_UpperCAmelCase)
if self.elapsed_time is None:
a_ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
a_ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
a_ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase__ ( self) ->str:
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=_UpperCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self) ->int:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class snake_case ( __lowerCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None) ->str:
super().__init__(_UpperCAmelCase)
a_ = None if column_names is None else [column_names]
a_ = None
def UpperCAmelCase__ ( self) ->List[str]:
a_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
a_ = disp.display(disp.HTML(self.html_code) , display_id=_UpperCAmelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
if self.inner_table is None:
a_ = [list(values.keys()), list(values.values())]
else:
a_ = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_UpperCAmelCase)
a_ = columns
self.inner_table.append([values[c] for c in columns])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=3_00) ->Optional[int]:
a_ = NotebookProgressBar(_UpperCAmelCase , prefix=_UpperCAmelCase , parent=self , width=_UpperCAmelCase)
return self.child_bar
def UpperCAmelCase__ ( self) ->Any:
a_ = None
self.display()
class snake_case ( __lowerCAmelCase ):
def __init__( self) ->int:
a_ = None
a_ = None
a_ = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Any:
a_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
a_ = 0
a_ = 0
a_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
a_ = NotebookTrainingTracker(state.max_steps , _UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
a_ = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
a_ = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Dict:
if not has_length(_UpperCAmelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
a_ = self.training_tracker.add_child(len(_UpperCAmelCase))
else:
a_ = NotebookProgressBar(len(_UpperCAmelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->str:
if self.prediction_bar is not None:
self.prediction_bar.close()
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->List[str]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
a_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
a_ = state.global_step
self.training_tracker.write_line(_UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->Any:
if self.training_tracker is not None:
a_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
a_ = log["loss"]
break
if self.first_column == "Epoch":
a_ = int(state.epoch)
else:
a_ = state.global_step
a_ = "eval"
for k in metrics:
if k.endswith("_loss"):
a_ = re.sub(r"\_loss$" , "" , _UpperCAmelCase)
a_ = metrics.pop("total_flos" , _UpperCAmelCase)
a_ = metrics.pop("epoch" , _UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_runtime''' , _UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _UpperCAmelCase)
a_ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _UpperCAmelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
a_ = v
else:
a_ = k.split("_")
a_ = " ".join([part.capitalize() for part in splits[1:]])
a_ = v
self.training_tracker.write_line(_UpperCAmelCase)
self.training_tracker.remove_child()
a_ = None
# Evaluation takes a long time so we should force the next update.
a_ = True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Tuple:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''' , force_update=_UpperCAmelCase)
a_ = None
| 243 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315 | 0 |
import operator as op
_snake_case = "scaler.pt"
_snake_case = "pytorch_model"
_snake_case = "random_states"
_snake_case = "optimizer"
_snake_case = "scheduler"
_snake_case = "pytorch_model.bin"
_snake_case = "pytorch_model.bin.index.json"
_snake_case = "model.safetensors"
_snake_case = "model.safetensors.index.json"
_snake_case = "1.10.2"
_snake_case = "py38"
_snake_case = "4.17.0"
_snake_case = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_snake_case = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_snake_case = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_snake_case = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_snake_case = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_snake_case = "2.0.1"
_snake_case = ["pdsh", "standard", "openmpi", "mvapich"]
_snake_case = ["default", "reduce-overhead", "max-autotune"]
_snake_case = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_snake_case = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_snake_case = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_snake_case = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315 | 0 |
_lowercase : Tuple ={str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case))
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(_snake_case))
if __name__ == "__main__":
print(solution())
| 170 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={"help": "The column name of the images in the files."} )
lowerCAmelCase_ = field(default=__lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase_ = field(default=__lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase_ = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase ={}
if self.train_dir is not None:
__lowercase =self.train_dir
if self.validation_dir is not None:
__lowercase =self.validation_dir
__lowercase =data_files if data_files else None
@dataclass
class lowerCAmelCase :
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."
)
} , )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ = field(default=__lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase_ = field(
default=0.7_5 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
lowerCAmelCase_ = field(
default=__lowerCAmelCase , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class lowerCAmelCase ( __lowerCAmelCase ):
lowerCAmelCase_ = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae', _snake_case, _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase =training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowercase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowercase =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _snake_case ) and data_args.train_val_split > 0.0:
__lowercase =ds['train'].train_test_split(data_args.train_val_split )
__lowercase =split['train']
__lowercase =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase =ViTMAEConfig.from_pretrained(model_args.config_name, **_snake_case )
elif model_args.model_name_or_path:
__lowercase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **_snake_case )
else:
__lowercase =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__lowercase =ViTImageProcessor.from_pretrained(model_args.image_processor_name, **_snake_case )
elif model_args.model_name_or_path:
__lowercase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **_snake_case )
else:
__lowercase =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__lowercase =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowercase =ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
__lowercase =ds['train'].column_names
else:
__lowercase =ds['validation'].column_names
if data_args.image_column_name is not None:
__lowercase =data_args.image_column_name
elif "image" in column_names:
__lowercase ='image'
elif "img" in column_names:
__lowercase ='img'
else:
__lowercase =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__lowercase =image_processor.size['shortest_edge']
else:
__lowercase =(image_processor.size['height'], image_processor.size['width'])
__lowercase =Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : List[Any] ):
__lowercase =[transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
__lowercase =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__lowercase =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__lowercase =Trainer(
model=_snake_case, args=_snake_case, train_dataset=ds['train'] if training_args.do_train else None, eval_dataset=ds['validation'] if training_args.do_eval else None, tokenizer=_snake_case, data_collator=_snake_case, )
# Training
if training_args.do_train:
__lowercase =None
if training_args.resume_from_checkpoint is not None:
__lowercase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase =last_checkpoint
__lowercase =trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase =trainer.evaluate()
trainer.log_metrics('eval', _snake_case )
trainer.save_metrics('eval', _snake_case )
# Write model card and (optionally) push to hub
__lowercase ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 141 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 315 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class _A :
def __init__( self : int ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = {}
def __a ( self : int , _A : str , _A : int , _A : Optional[Any]=1 ) -> List[str]:
"""simple docstring"""
if self.graph.get(_UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase : int = [[w, v]]
if not self.graph.get(_UpperCAmelCase ):
lowercase : List[Any] = []
def __a ( self : Any ) -> int:
"""simple docstring"""
return list(self.graph )
def __a ( self : Dict , _A : Union[str, Any] , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
def __a ( self : Tuple , _A : Dict=-2 , _A : Optional[int]=-1 ) -> Optional[Any]:
"""simple docstring"""
if s == d:
return []
lowercase : int = []
lowercase : Union[str, Any] = []
if s == -2:
lowercase : Optional[Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase : List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : Dict = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def __a ( self : int , _A : Tuple=-1 ) -> List[str]:
"""simple docstring"""
if c == -1:
lowercase : Any = floor(random() * 10_000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def __a ( self : int , _A : Dict=-2 ) -> Dict:
"""simple docstring"""
lowercase : Any = deque()
lowercase : Dict = []
if s == -2:
lowercase : int = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self : Tuple , _A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __a ( self : Dict , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.graph[u] )
def __a ( self : Any , _A : int=-2 ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] = []
lowercase : List[str] = []
if s == -2:
lowercase : Any = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : Any = s
lowercase : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCAmelCase ) != 0:
lowercase : Dict = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : List[str] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return sorted_nodes
def __a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = []
lowercase : Any = []
lowercase : Optional[Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : int = -2
lowercase : Tuple = []
lowercase : List[str] = s
lowercase : List[str] = False
lowercase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase : Any = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase : List[Any] = True
if len(_UpperCAmelCase ) != 0:
lowercase : List[Any] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : str = False
indirect_parents.append(_UpperCAmelCase )
lowercase : Optional[Any] = s
lowercase : Optional[int] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase : str = []
lowercase : List[str] = []
lowercase : List[Any] = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : Dict = -2
lowercase : Any = []
lowercase : Optional[int] = s
lowercase : Optional[Any] = False
lowercase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase : Optional[int] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase : Optional[Any] = True
if len(_UpperCAmelCase ) != 0:
lowercase : int = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : Dict = False
indirect_parents.append(_UpperCAmelCase )
lowercase : str = s
lowercase : Optional[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def __a ( self : Optional[Any] , _A : str=-2 , _A : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
lowercase : str = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Any = time()
return end - begin
def __a ( self : Union[str, Any] , _A : List[Any]=-2 ) -> Dict:
"""simple docstring"""
lowercase : List[str] = time()
self.bfs(_UpperCAmelCase )
lowercase : Dict = time()
return end - begin
class _A :
def __init__( self : str ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = {}
def __a ( self : List[Any] , _A : str , _A : int , _A : Optional[int]=1 ) -> str:
"""simple docstring"""
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase : List[Any] = [[w, v]]
# add the other way
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase : Tuple = [[w, u]]
def __a ( self : Optional[int] , _A : Union[str, Any] , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
# the other way round
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCAmelCase )
def __a ( self : Optional[int] , _A : Optional[int]=-2 , _A : str=-1 ) -> Optional[int]:
"""simple docstring"""
if s == d:
return []
lowercase : Optional[int] = []
lowercase : int = []
if s == -2:
lowercase : str = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
lowercase : Tuple = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : Union[str, Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def __a ( self : Optional[Any] , _A : str=-1 ) -> Tuple:
"""simple docstring"""
if c == -1:
lowercase : Any = floor(random() * 10_000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def __a ( self : Dict , _A : List[str]=-2 ) -> Any:
"""simple docstring"""
lowercase : List[str] = deque()
lowercase : List[Any] = []
if s == -2:
lowercase : Tuple = list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
lowercase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self : Union[str, Any] , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.graph[u] )
def __a ( self : Any ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = []
lowercase : Any = []
lowercase : Tuple = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : Any = -2
lowercase : Dict = []
lowercase : List[Any] = s
lowercase : Any = False
lowercase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase : Tuple = len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase : Dict = True
if len(_UpperCAmelCase ) != 0:
lowercase : Dict = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : Dict = False
indirect_parents.append(_UpperCAmelCase )
lowercase : Dict = s
lowercase : List[Any] = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = []
lowercase : List[str] = []
lowercase : Dict = list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
lowercase : List[Any] = -2
lowercase : Optional[Any] = []
lowercase : Any = s
lowercase : Dict = False
lowercase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase : List[Any] = len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase : Dict = True
if len(_UpperCAmelCase ) != 0:
lowercase : Optional[int] = stack[len(_UpperCAmelCase ) - 1]
else:
lowercase : Optional[int] = False
indirect_parents.append(_UpperCAmelCase )
lowercase : Any = s
lowercase : str = ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def __a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return list(self.graph )
def __a ( self : str , _A : Optional[Any]=-2 , _A : int=-1 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = time()
return end - begin
def __a ( self : Tuple , _A : str=-2 ) -> Dict:
"""simple docstring"""
lowercase : List[str] = time()
self.bfs(_UpperCAmelCase )
lowercase : str = time()
return end - begin
| 308 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 0 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = position
lowerCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase = []
for position in positions:
lowerCamelCase , lowerCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : tuple[int, int] , lowerCamelCase__ : int ):
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
lowerCamelCase , lowerCamelCase = position
if board[y][x] == 0:
lowerCamelCase = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
lowerCamelCase = 0
return False
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
lowerCamelCase = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
lowerCamelCase = 0
lowerCamelCase = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]:
'''simple docstring'''
_A , _A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A , _A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_snake_case )
return permissible_positions
def _snake_case ( _snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool:
'''simple docstring'''
if is_complete(_snake_case ):
return True
for position in get_valid_pos(_snake_case , len(_snake_case ) ):
_A , _A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ):
return True
_A = 0
return False
def _snake_case ( _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
_A = 1
if open_knight_tour_helper(_snake_case , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
__A = 256
# Modulus to hash a string
__A = 100_0003
def lowerCAmelCase_ ( __a , __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Optional[int] =len(_snake_case )
lowerCamelCase__: int =len(_snake_case )
if p_len > t_len:
return False
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: Tuple =0
lowerCamelCase__: List[str] =1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
lowerCamelCase__: List[str] =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase__: Tuple =(ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase__: Optional[Any] =(modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase__: List[Any] =(
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] ="abc1abc12"
lowerCamelCase__: Dict ="alskfjaldsabc1abc1abc12k23adsfabcabc"
lowerCamelCase__: Any ="alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
lowerCamelCase__: List[str] ="ABABX"
lowerCamelCase__: Tuple ="ABABZABABYABABX"
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
lowerCamelCase__: List[Any] ="AAAB"
lowerCamelCase__: Union[str, Any] ="ABAAAAAB"
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
lowerCamelCase__: Optional[int] ="abcdabcy"
lowerCamelCase__: Optional[Any] ="abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
lowerCamelCase__: Dict ="Lü"
lowerCamelCase__: Any ="Lüsai"
assert rabin_karp(_snake_case , _snake_case )
lowerCamelCase__: List[Any] ="Lue"
assert not rabin_karp(_snake_case , _snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 10 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : str = 50 # max width of layer names
__UpperCamelCase : List[str] = 70 # max width of quantizer names
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : List[str] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_snake_case , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_snake_case , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_snake_case , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_snake_case , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_snake_case , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_snake_case , type=_snake_case , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_snake_case , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if args.calibrator == "max":
UpperCamelCase__ : Tuple = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCamelCase__ : Union[str, Any] = '''histogram'''
elif args.calibrator == "mse":
UpperCamelCase__ : Dict = '''histogram'''
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCamelCase__ : List[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
UpperCamelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['''embeddings'''] , which='''weight''' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''''''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
def fusea(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCamelCase__ : Any = qq._amax.detach().item()
UpperCamelCase__ : Any = qk._amax.detach().item()
UpperCamelCase__ : Optional[Any] = qv._amax.detach().item()
UpperCamelCase__ : List[Any] = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCamelCase__ : str = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
UpperCamelCase__ : Union[str, Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_snake_case , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCamelCase__ : Any = mod.weight.shape[0]
UpperCamelCase__ : Optional[int] = mod._weight_quantizer._amax.detach()
UpperCamelCase__ : Optional[int] = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_snake_case , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCamelCase__ : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCamelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCamelCase__ : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCamelCase__ : Union[str, Any] = amax
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=25 , SCREAMING_SNAKE_CASE : str=180 , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if ignore is None:
UpperCamelCase__ : Tuple = []
elif not isinstance(_snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] = [ignore]
UpperCamelCase__ : Any = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , '''weight''' ):
continue
UpperCamelCase__ : List[Any] = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
UpperCamelCase__ : Any = getattr(_snake_case , '''_input_quantizer''' , _snake_case )
UpperCamelCase__ : Union[str, Any] = getattr(_snake_case , '''_weight_quantizer''' , _snake_case )
if not hasattr(_snake_case , '''weight''' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
UpperCamelCase__ : List[Any] = F"Act:{input_q.extra_repr()}"
UpperCamelCase__ : Any = F"Wgt:{weight_q.extra_repr()}"
UpperCamelCase__ : Dict = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F"{name} has no {quantizer}" )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str="both" , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : str = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '''_input_quantizer''' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '''_weight_quantizer''' , _snake_case , _snake_case )
logger.info(_snake_case )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_snake_case , '''_input_quantizer''' ) or hasattr(_snake_case , '''_weight_quantizer''' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_snake_case , _snake_case ):
UpperCamelCase__ : List[str] = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 146 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.