code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import operator
def A__ ( lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None ) -> list:
UpperCamelCase_: Union[str, Any] = operator.lt if reverse else operator.gt
UpperCamelCase_: List[Any] = solution or []
if not arr:
return solution
UpperCamelCase_: str = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase ):
if _operator(lowerCamelCase , sublist[-1] ):
sublist.append(lowerCamelCase )
arr.pop(lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase )
else:
while sublist:
UpperCamelCase_: int = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase ):
if not _operator(lowerCamelCase , lowerCamelCase ):
solution.insert(lowerCamelCase , lowerCamelCase )
break
else:
solution.append(lowerCamelCase )
strand_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A__ ( lowerCamelCase ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase_: str = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Tuple = self.dummy_uncond_unet
UpperCamelCase_: Dict = PNDMScheduler()
UpperCamelCase_: str = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Tuple = torch.manual_seed(0 )
UpperCamelCase_: Dict = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="""numpy""" ).images
UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_: int = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Tuple = """google/ddpm-cifar10-32"""
UpperCamelCase_: str = UNetaDModel.from_pretrained(snake_case_ )
UpperCamelCase_: str = PNDMScheduler()
UpperCamelCase_: Dict = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: int = torch.manual_seed(0 )
UpperCamelCase_: Optional[Any] = pndm(generator=snake_case_ , output_type="""numpy""" ).images
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_: str = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Optional[int] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""CLIPFeatureExtractor"""]
lowerCamelCase_ : int = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ : List[str] = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Any , snake_case_ : int=False , snake_case_ : int=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Union[str, Any]="<unk>" , snake_case_ : Union[str, Any]="<sep>" , snake_case_ : str="<pad>" , snake_case_ : List[Any]="<cls>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : int=["<eop>", "<eod>"] , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Optional[int] , ):
UpperCamelCase_: Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
UpperCamelCase_: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
UpperCamelCase_: Optional[Any] = 3
UpperCamelCase_: int = do_lower_case
UpperCamelCase_: Union[str, Any] = remove_space
UpperCamelCase_: List[Any] = keep_accents
UpperCamelCase_: Any = vocab_file
UpperCamelCase_: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
UpperCamelCase_: List[Any] = jieba
UpperCamelCase_: Dict = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase__ ( self : Dict ):
return len(self.sp_model )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
UpperCamelCase_: Any = self.__dict__.copy()
UpperCamelCase_: Optional[int] = None
return state
def __setstate__( self : List[str] , snake_case_ : List[Any] ):
UpperCamelCase_: int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] ):
if self.remove_space:
UpperCamelCase_: List[Any] = """ """.join(inputs.strip().split() )
else:
UpperCamelCase_: List[Any] = inputs
UpperCamelCase_: str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase_: List[Any] = unicodedata.normalize("""NFKD""" , snake_case_ )
UpperCamelCase_: Any = """""".join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
UpperCamelCase_: int = outputs.lower()
return outputs
def lowerCAmelCase__ ( self : List[str] , snake_case_ : str ):
UpperCamelCase_: str = self.preprocess_text(snake_case_ )
UpperCamelCase_: Optional[int] = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
UpperCamelCase_: Dict = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase_: Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_: List[Any] = cur_pieces[1:]
else:
UpperCamelCase_: List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[Any] ):
return self.sp_model.PieceToId(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : str ):
return self.sp_model.IdToPiece(snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = """""".join(snake_case_ ).replace(snake_case_ , """ """ ).strip()
return out_string
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: List[Any] = [self.sep_token_id]
UpperCamelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1, 1]
return ([0] * len(snake_case_ )) + [1, 1]
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: str = [self.sep_token_id]
UpperCamelCase_: Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
UpperCamelCase_: Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[str] ):
UpperCamelCase_: List[str] = super()._decode(*snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
UpperCamelCase_: int = nn.ModuleList(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : torch.FloatTensor , snake_case_ : Union[torch.Tensor, float, int] , snake_case_ : torch.Tensor , snake_case_ : List[torch.tensor] , snake_case_ : List[float] , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : bool = False , snake_case_ : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(snake_case_ , snake_case_ , self.nets ) ):
UpperCamelCase_, UpperCamelCase_: Any = controlnet(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# merge samples
if i == 0:
UpperCamelCase_, UpperCamelCase_: List[Any] = down_samples, mid_sample
else:
UpperCamelCase_: Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(snake_case_ , snake_case_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Union[str, os.PathLike] , snake_case_ : bool = True , snake_case_ : Callable = None , snake_case_ : bool = False , snake_case_ : Optional[str] = None , ):
UpperCamelCase_: Any = 0
UpperCamelCase_: Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
snake_case_ , is_main_process=snake_case_ , save_function=snake_case_ , safe_serialization=snake_case_ , variant=snake_case_ , )
idx += 1
UpperCamelCase_: List[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def lowerCAmelCase__ ( cls : List[str] , snake_case_ : Optional[Union[str, os.PathLike]] , **snake_case_ : Any ):
UpperCamelCase_: Tuple = 0
UpperCamelCase_: Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase_: Union[str, Any] = pretrained_model_path
while os.path.isdir(snake_case_ ):
UpperCamelCase_: List[Any] = ControlNetModel.from_pretrained(snake_case_ , **snake_case_ )
controlnets.append(snake_case_ )
idx += 1
UpperCamelCase_: List[Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(snake_case_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(snake_case_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(snake_case_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(snake_case_ )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = VQModel
__UpperCamelCase : Tuple = """sample"""
@property
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[int]=(32, 32) ):
UpperCamelCase_: List[Any] = 4
UpperCamelCase_: Optional[Any] = 3
UpperCamelCase_: Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
return {"sample": image}
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
UpperCamelCase_: List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case_ )
UpperCamelCase_: int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(snake_case_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCamelCase_: Optional[int] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCamelCase_: Union[str, Any] = image.to(snake_case_ )
with torch.no_grad():
UpperCamelCase_: Dict = model(snake_case_ ).sample
UpperCamelCase_: Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase_: str = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False ) -> Tuple:
UpperCamelCase_: int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCamelCase_: Dict = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_: Optional[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_: List[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase_: List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase_: Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_: Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_: int = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase_: List[str] = in_proj_bias[-config.hidden_size :]
def A__ ( lowerCamelCase ) -> List[Any]:
UpperCamelCase_: Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: Union[str, Any] = dct.pop(lowerCamelCase )
UpperCamelCase_: int = val
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowerCamelCase )
UpperCamelCase_: Union[str, Any] = False
UpperCamelCase_: List[Any] = False
UpperCamelCase_: Optional[int] = False
UpperCamelCase_: Union[str, Any] = False
if "vqa" in checkpoint_url:
UpperCamelCase_: str = True
UpperCamelCase_: Optional[int] = 31_29
UpperCamelCase_: List[str] = """huggingface/label-files"""
UpperCamelCase_: List[str] = """vqa2-id2label.json"""
UpperCamelCase_: int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_: str = idalabel
UpperCamelCase_: List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase_: Union[str, Any] = ViltForQuestionAnswering(lowerCamelCase )
elif "nlvr" in checkpoint_url:
UpperCamelCase_: Optional[Any] = True
UpperCamelCase_: Tuple = 2
UpperCamelCase_: str = {0: """False""", 1: """True"""}
UpperCamelCase_: Tuple = {v: k for k, v in config.idalabel.items()}
UpperCamelCase_: Union[str, Any] = 3
UpperCamelCase_: int = ViltForImagesAndTextClassification(lowerCamelCase )
elif "irtr" in checkpoint_url:
UpperCamelCase_: str = True
UpperCamelCase_: Any = ViltForImageAndTextRetrieval(lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
UpperCamelCase_: Optional[int] = True
UpperCamelCase_: Union[str, Any] = ViltForMaskedLM(lowerCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
UpperCamelCase_: str = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
UpperCamelCase_: Tuple = create_rename_keys(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_q_k_v(lowerCamelCase , lowerCamelCase )
if mlm_model or irtr_model:
UpperCamelCase_: str = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCamelCase_, UpperCamelCase_: Tuple = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase )
# Define processor
UpperCamelCase_: List[str] = ViltImageProcessor(size=3_84 )
UpperCamelCase_: Any = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCamelCase_: str = ViltProcessor(lowerCamelCase , lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCamelCase_: List[str] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCamelCase ).raw )
UpperCamelCase_: Any = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=lowerCamelCase ).raw )
UpperCamelCase_: List[Any] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
UpperCamelCase_: Optional[int] = processor(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
UpperCamelCase_: Optional[int] = processor(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
UpperCamelCase_: str = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCamelCase_: Optional[Any] = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=lowerCamelCase ).raw )
if mlm_model:
UpperCamelCase_: Optional[Any] = """a bunch of [MASK] laying on a [MASK]."""
else:
UpperCamelCase_: List[Any] = """How many cats are there?"""
UpperCamelCase_: Optional[Any] = processor(lowerCamelCase , lowerCamelCase , return_tensors="""pt""" )
UpperCamelCase_: List[str] = model(**lowerCamelCase )
# Verify outputs
if mlm_model:
UpperCamelCase_: str = torch.Size([1, 11, 3_05_22] )
UpperCamelCase_: Tuple = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
UpperCamelCase_: Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCamelCase_: Union[str, Any] = torch.Size([1, 31_29] )
UpperCamelCase_: str = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
UpperCamelCase_: List[str] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCamelCase_: Any = torch.Size([1, 2] )
UpperCamelCase_: Dict = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : int = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : List[Any] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCamelCase_ : List[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , snake_case_ : int = 128 , snake_case_ : int = 256 , snake_case_ : float = 2000.0 , snake_case_ : int = 768 , snake_case_ : int = 12 , snake_case_ : int = 12 , snake_case_ : int = 64 , snake_case_ : int = 2048 , snake_case_ : float = 0.1 , ):
super().__init__()
UpperCamelCase_: Tuple = nn.Sequential(
nn.Linear(snake_case_ , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , )
UpperCamelCase_: Any = nn.Embedding(snake_case_ , snake_case_ )
UpperCamelCase_: Tuple = False
UpperCamelCase_: Optional[int] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Dropout(p=snake_case_ )
UpperCamelCase_: int = nn.ModuleList()
for lyr_num in range(snake_case_ ):
# FiLM conditional T5 decoder
UpperCamelCase_: str = DecoderLayer(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
self.decoders.append(snake_case_ )
UpperCamelCase_: Union[str, Any] = TaLayerNorm(snake_case_ )
UpperCamelCase_: Tuple = nn.Dropout(p=snake_case_ )
UpperCamelCase_: Any = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: List[str] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_: Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_: str = self.conditioning_emb(snake_case_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_: Optional[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_: Union[str, Any] = torch.broadcast_to(
torch.arange(snake_case_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_: List[str] = self.position_encoding(snake_case_ )
UpperCamelCase_: List[Any] = self.continuous_inputs_projection(snake_case_ )
inputs += position_encodings
UpperCamelCase_: Union[str, Any] = self.dropout(snake_case_ )
# decoder: No padding present.
UpperCamelCase_: List[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_: Tuple = [(x, self.encoder_decoder_mask(snake_case_ , snake_case_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_: List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_: int = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_: Union[str, Any] = lyr(
snake_case_ , conditioning_emb=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )[0]
UpperCamelCase_: Any = self.decoder_norm(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.post_dropout(snake_case_ )
UpperCamelCase_: List[Any] = self.spec_out(snake_case_ )
return spec_out
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str]=1e-6 ):
super().__init__()
UpperCamelCase_: Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ ) )
def lowerCAmelCase__ ( self : Any , snake_case_ : Any , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , ):
UpperCamelCase_: str = self.layer[0](
snake_case_ , conditioning_emb=snake_case_ , attention_mask=snake_case_ , )
if encoder_hidden_states is not None:
UpperCamelCase_: Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_: Any = self.layer[1](
snake_case_ , key_value_states=snake_case_ , attention_mask=snake_case_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_: Tuple = self.layer[-1](snake_case_ , snake_case_ )
return (hidden_states,)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any ):
super().__init__()
UpperCamelCase_: Optional[Any] = TaLayerNorm(snake_case_ )
UpperCamelCase_: Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
UpperCamelCase_: Dict = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
UpperCamelCase_: int = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : List[Any]=None , ):
# pre_self_attention_layer_norm
UpperCamelCase_: Union[str, Any] = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
UpperCamelCase_: List[str] = self.FiLMLayer(snake_case_ , snake_case_ )
# Self-attention block
UpperCamelCase_: Optional[int] = self.attention(snake_case_ )
UpperCamelCase_: List[str] = hidden_states + self.dropout(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
super().__init__()
UpperCamelCase_: Any = Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
UpperCamelCase_: int = TaLayerNorm(snake_case_ , eps=snake_case_ )
UpperCamelCase_: List[Any] = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , ):
UpperCamelCase_: Optional[Any] = self.layer_norm(snake_case_ )
UpperCamelCase_: str = self.attention(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_: List[str] = hidden_states + self.dropout(snake_case_ )
return layer_output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
super().__init__()
UpperCamelCase_: Optional[int] = TaDenseGatedActDense(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
UpperCamelCase_: Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
UpperCamelCase_: Optional[Any] = TaLayerNorm(snake_case_ , eps=snake_case_ )
UpperCamelCase_: int = nn.Dropout(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : Any=None ):
UpperCamelCase_: Dict = self.layer_norm(snake_case_ )
if conditioning_emb is not None:
UpperCamelCase_: List[Any] = self.film(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = self.DenseReluDense(snake_case_ )
UpperCamelCase_: Optional[int] = hidden_states + self.dropout(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ):
super().__init__()
UpperCamelCase_: List[Any] = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCamelCase_: Optional[Any] = nn.Dropout(snake_case_ )
UpperCamelCase_: int = NewGELUActivation()
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[Any] ):
UpperCamelCase_: int = self.act(self.wi_a(snake_case_ ) )
UpperCamelCase_: Union[str, Any] = self.wi_a(snake_case_ )
UpperCamelCase_: Dict = hidden_gelu * hidden_linear
UpperCamelCase_: Any = self.dropout(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.wo(snake_case_ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=1e-6 ):
super().__init__()
UpperCamelCase_: List[Any] = nn.Parameter(torch.ones(snake_case_ ) )
UpperCamelCase_: List[Any] = eps
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase_: Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case_ )
UpperCamelCase_: Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_: Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(snake_case_ , 3.0 )) ))
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int ):
super().__init__()
UpperCamelCase_: int = nn.Linear(snake_case_ , out_features * 2 , bias=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Dict ):
UpperCamelCase_: Optional[Any] = self.scale_bias(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = torch.chunk(snake_case_ , 2 , -1 )
UpperCamelCase_: Tuple = x * (1 + scale) + shift
return x
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCamelCase_ : Any = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCamelCase_ : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCamelCase_ : Tuple = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCamelCase_ : Optional[int] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCamelCase_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCamelCase_ : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCamelCase_ : Any = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCamelCase_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCamelCase_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCamelCase_ : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCamelCase_ : Optional[Any] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCamelCase_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Tuple = FLAX_MODEL_MAPPING
lowerCamelCase_ : str = auto_class_update(FlaxAutoModel)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase_ : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase_ : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase_ : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase_ : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase_ : List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase_ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase_ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class _UpperCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase_ : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
import random
def A__ ( lowerCamelCase , lowerCamelCase ) -> tuple:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Tuple = [], [], []
for element in data:
if element < pivot:
less.append(lowerCamelCase )
elif element > pivot:
greater.append(lowerCamelCase )
else:
equal.append(lowerCamelCase )
return less, equal, greater
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCamelCase ) or index < 0:
return None
UpperCamelCase_: List[Any] = items[random.randint(0 , len(lowerCamelCase ) - 1 )]
UpperCamelCase_: List[str] = 0
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[Any] = _partition(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: str = len(lowerCamelCase )
UpperCamelCase_: int = len(lowerCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCamelCase , lowerCamelCase )
# must be in larger
else:
return quick_select(lowerCamelCase , index - (m + count) )
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
# Load configuration defined in the metadata file
with open(lowerCamelCase ) as metadata_file:
UpperCamelCase_: Dict = json.load(lowerCamelCase )
UpperCamelCase_: Optional[int] = LukeConfig(use_entity_aware_attention=lowerCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase_: Optional[int] = torch.load(lowerCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCamelCase_: str = load_original_entity_vocab(lowerCamelCase )
# add an entry for [MASK2]
UpperCamelCase_: Tuple = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase_: str = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase_: Any = AddedToken("""<ent>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
UpperCamelCase_: Optional[Any] = AddedToken("""<ent2>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowerCamelCase )
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCamelCase_: Optional[Any] = json.load(lowerCamelCase )
UpperCamelCase_: Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
with open(os.path.join(lowerCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = MLukeTokenizer.from_pretrained(lowerCamelCase )
# Initialize the embeddings of the special tokens
UpperCamelCase_: Optional[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCamelCase_: Tuple = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCamelCase_: List[str] = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase_: int = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase_: List[str] = state_dict[bias_name]
UpperCamelCase_: List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase_: Any = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase_: Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase_: Any = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase_: Union[str, Any] = state_dict[prefix + matrix_name]
UpperCamelCase_: int = state_dict[prefix + matrix_name]
UpperCamelCase_: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase_: List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase_: List[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase_: List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase_: str = state_dict["""entity_predictions.bias"""]
UpperCamelCase_: Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase_: List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase_: List[str] = LukeForMaskedLM(config=lowerCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCamelCase_: Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCamelCase_: Dict = state_dict[key]
else:
UpperCamelCase_: Any = state_dict[key]
UpperCamelCase_, UpperCamelCase_: Any = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if set(lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase_: int = MLukeTokenizer.from_pretrained(lowerCamelCase , task="""entity_classification""" )
UpperCamelCase_: Optional[int] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCamelCase_: Any = (0, 9)
UpperCamelCase_: Optional[int] = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase_: Any = model(**lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Optional[Any] = torch.Size((1, 33, 7_68) )
UpperCamelCase_: List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase_: Any = torch.Size((1, 1, 7_68) )
UpperCamelCase_: List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase_: List[str] = MLukeTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[Any] = """Tokyo is the capital of <mask>."""
UpperCamelCase_: Union[str, Any] = (24, 30)
UpperCamelCase_: str = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase_: Dict = model(**lowerCamelCase )
UpperCamelCase_: str = encoding["""input_ids"""][0].tolist()
UpperCamelCase_: str = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCamelCase_: Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase )
UpperCamelCase_: Optional[int] = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase_: Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCamelCase ) )
model.save_pretrained(lowerCamelCase )
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCamelCase_: Optional[Any] = [json.loads(lowerCamelCase ) for line in open(lowerCamelCase )]
UpperCamelCase_: Union[str, Any] = {}
for entry in data:
UpperCamelCase_: Any = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase_: List[str] = entity_id
break
UpperCamelCase_: Union[str, Any] = F'''{language}:{entity_name}'''
UpperCamelCase_: Dict = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_: Optional[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase_: Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Dict = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase_: Tuple = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
UpperCamelCase_: int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase_: Dict = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Tuple ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Optional[int] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_: List[Any] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: int = self.get_rust_tokenizer()
UpperCamelCase_: List[Any] = self.get_image_processor()
UpperCamelCase_: int = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase_: List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
UpperCamelCase_: Optional[int] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase_: Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Union[str, Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: str = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[Any] = self.prepare_image_inputs()
UpperCamelCase_: Union[str, Any] = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: List[str] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: str = """lower newer"""
UpperCamelCase_: int = processor(text=snake_case_ )
UpperCamelCase_: List[str] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[str] = """lower newer"""
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: Union[str, Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Optional[int] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Tuple = self.prepare_image_inputs()
UpperCamelCase_: int = self.prepare_image_inputs()
UpperCamelCase_: Any = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.get_image_processor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Tuple = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Any = processor.batch_decode(snake_case_ )
UpperCamelCase_: Union[str, Any] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import numpy
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , snake_case_ : numpy.ndarray , snake_case_ : numpy.ndarray ):
UpperCamelCase_: str = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_: List[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_: Any = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_: List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_: List[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_: str = numpy.zeros(output_array.shape )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_: Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_: List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_: Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_: Any = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase__ ( self : int , snake_case_ : numpy.ndarray , snake_case_ : int , snake_case_ : bool ):
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_: Tuple = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_: int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : numpy.ndarray ):
UpperCamelCase_: Optional[Any] = input_arr
UpperCamelCase_: Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_: Dict = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_: List[str] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A__ ( lowerCamelCase ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def A__ ( lowerCamelCase ) -> numpy.ndarray:
return (value) * (1 - (value))
def A__ ( ) -> int:
UpperCamelCase_: str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_: Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCamelCase_: Optional[Any] = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase , output_array=lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase , iterations=10 , give_loss=lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = (0, 0)
UpperCamelCase_: str = None
UpperCamelCase_: Tuple = 0
UpperCamelCase_: Dict = 0
UpperCamelCase_: Any = 0
def __eq__( self : List[Any] , snake_case_ : int ):
return self.position == cell.position
def lowerCAmelCase__ ( self : str ):
print(self.position )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : List[Any]=(5, 5) ):
UpperCamelCase_: List[str] = np.zeros(snake_case_ )
UpperCamelCase_: Dict = world_size[0]
UpperCamelCase_: int = world_size[1]
def lowerCAmelCase__ ( self : Optional[Any] ):
print(self.w )
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any] ):
UpperCamelCase_: List[str] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCamelCase_: int = cell.position[0]
UpperCamelCase_: Dict = cell.position[1]
UpperCamelCase_: int = []
for n in neughbour_cord:
UpperCamelCase_: int = current_x + n[0]
UpperCamelCase_: int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCamelCase_: Dict = Cell()
UpperCamelCase_: Optional[int] = (x, y)
UpperCamelCase_: Optional[Any] = cell
neighbours.append(snake_case_ )
return neighbours
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Optional[int] = []
_open.append(lowerCamelCase )
while _open:
UpperCamelCase_: Tuple = np.argmin([n.f for n in _open] )
UpperCamelCase_: str = _open[min_f]
_closed.append(_open.pop(lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(lowerCamelCase ):
for c in _closed:
if c == n:
continue
UpperCamelCase_: Optional[Any] = current.g + 1
UpperCamelCase_, UpperCamelCase_: Any = n.position
UpperCamelCase_, UpperCamelCase_: List[str] = goal.position
UpperCamelCase_: Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCamelCase_: Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = []
while current.parent is not None:
path.append(current.position )
UpperCamelCase_: Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase_ : Tuple = Gridworld()
# Start position and goal
lowerCamelCase_ : Tuple = Cell()
lowerCamelCase_ : Union[str, Any] = (0, 0)
lowerCamelCase_ : Dict = Cell()
lowerCamelCase_ : int = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowerCamelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase_ : List[Any] = 1
print(world.w)
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
def A__ ( ) -> Optional[int]:
UpperCamelCase_: Tuple = 0
for i in range(1 , 10_01 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """bart"""
__UpperCamelCase : List[str] = ["""past_key_values"""]
__UpperCamelCase : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , snake_case_ : Tuple=5_0265 , snake_case_ : int=1024 , snake_case_ : Any=12 , snake_case_ : Optional[Any]=4096 , snake_case_ : Union[str, Any]=16 , snake_case_ : Any=12 , snake_case_ : List[str]=4096 , snake_case_ : str=16 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Dict=0.0 , snake_case_ : List[Any]="gelu" , snake_case_ : Union[str, Any]=1024 , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Dict=0.02 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Any=False , snake_case_ : Any=True , snake_case_ : List[Any]=3 , snake_case_ : int=1 , snake_case_ : Optional[Any]=0 , snake_case_ : Any=2 , snake_case_ : Optional[int]=True , snake_case_ : List[str]=2 , snake_case_ : Dict=2 , **snake_case_ : Optional[int] , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Dict = d_model
UpperCamelCase_: List[Any] = encoder_ffn_dim
UpperCamelCase_: int = encoder_layers
UpperCamelCase_: Dict = encoder_attention_heads
UpperCamelCase_: Optional[int] = decoder_ffn_dim
UpperCamelCase_: Any = decoder_layers
UpperCamelCase_: Tuple = decoder_attention_heads
UpperCamelCase_: Tuple = dropout
UpperCamelCase_: Tuple = attention_dropout
UpperCamelCase_: Optional[int] = activation_dropout
UpperCamelCase_: Any = activation_function
UpperCamelCase_: Union[str, Any] = init_std
UpperCamelCase_: List[Any] = encoder_layerdrop
UpperCamelCase_: Union[str, Any] = decoder_layerdrop
UpperCamelCase_: str = classifier_dropout
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Optional[int] = encoder_layers
UpperCamelCase_: Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , snake_case_ ):
UpperCamelCase_: List[str] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class _UpperCamelCase ( _A ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase_: int = {0: """batch"""}
UpperCamelCase_: Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase_: Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase_: Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase_: List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase_, UpperCamelCase_: Tuple = self.num_layers
for i in range(snake_case_ ):
UpperCamelCase_: Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase_: Any = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCamelCase_: Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Optional[int] = super().outputs
else:
UpperCamelCase_: List[Any] = super(snake_case_ , self ).outputs
if self.use_past:
UpperCamelCase_, UpperCamelCase_: List[str] = self.num_layers
for i in range(snake_case_ ):
UpperCamelCase_: Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase_: List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase__ ( self : Dict , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
UpperCamelCase_: Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
UpperCamelCase_: Optional[Any] = seq_length if not self.use_past else 1
UpperCamelCase_: str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: Optional[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase_: Union[str, Any] = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase_, UpperCamelCase_: Tuple = common_inputs["""input_ids"""].shape
UpperCamelCase_: List[str] = common_inputs["""decoder_input_ids"""].shape[1]
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = self.num_attention_heads
UpperCamelCase_: Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_: Tuple = decoder_seq_length + 3
UpperCamelCase_: Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase_: Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
UpperCamelCase_: List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase_, UpperCamelCase_: int = self.num_layers
UpperCamelCase_: Dict = min(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = max(snake_case_ , snake_case_ ) - min_num_layers
UpperCamelCase_: Union[str, Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
UpperCamelCase_: str = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def lowerCAmelCase__ ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
UpperCamelCase_: int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase_, UpperCamelCase_: int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase_: Any = seqlen + 2
UpperCamelCase_, UpperCamelCase_: int = self.num_layers
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = self.num_attention_heads
UpperCamelCase_: Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_: Optional[int] = common_inputs["""attention_mask"""].dtype
UpperCamelCase_: Union[str, Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
UpperCamelCase_: List[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def lowerCAmelCase__ ( self : Dict , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_: Any = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_: Dict = tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase_: List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_: Optional[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase_: Any = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def lowerCAmelCase__ ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
UpperCamelCase_: Optional[int] = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
UpperCamelCase_: Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_: Tuple = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCamelCase_: List[str] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = """visual_bert"""
def __init__( self : Any , snake_case_ : List[Any]=3_0522 , snake_case_ : int=768 , snake_case_ : List[Any]=512 , snake_case_ : int=12 , snake_case_ : Tuple=12 , snake_case_ : Optional[Any]=3072 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=512 , snake_case_ : Tuple=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[int]=1e-12 , snake_case_ : Any=False , snake_case_ : Any=True , snake_case_ : Union[str, Any]=1 , snake_case_ : int=0 , snake_case_ : str=2 , **snake_case_ : Optional[int] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: List[str] = vocab_size
UpperCamelCase_: List[str] = max_position_embeddings
UpperCamelCase_: List[str] = hidden_size
UpperCamelCase_: List[str] = visual_embedding_dim
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Tuple = num_attention_heads
UpperCamelCase_: List[str] = intermediate_size
UpperCamelCase_: str = hidden_act
UpperCamelCase_: int = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: List[Any] = type_vocab_size
UpperCamelCase_: int = layer_norm_eps
UpperCamelCase_: Union[str, Any] = bypass_transformer
UpperCamelCase_: Optional[int] = special_visual_initialize
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : int , snake_case_ : pyspark.sql.DataFrame , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Optional[Features] = None , snake_case_ : bool = True , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : bool = True , snake_case_ : str = "arrow" , **snake_case_ : Any , ):
super().__init__(
split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , **snake_case_ , )
UpperCamelCase_: int = load_from_cache_file
UpperCamelCase_: str = file_format
UpperCamelCase_: List[Any] = Spark(
df=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , working_dir=snake_case_ , **snake_case_ , )
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase_: Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
lowerCamelCase_ : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
# Return True if there is node that has not iterated.
UpperCamelCase_: List[Any] = [False] * len(lowerCamelCase )
UpperCamelCase_: int = [s]
UpperCamelCase_: Tuple = True
while queue:
UpperCamelCase_: str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
UpperCamelCase_: str = True
UpperCamelCase_: Union[str, Any] = u
return visited[t]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: Any = [-1] * (len(lowerCamelCase ))
UpperCamelCase_: Tuple = 0
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: List[str] = float("""Inf""" )
UpperCamelCase_: Any = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase_: List[str] = min(lowerCamelCase , graph[parent[s]][s] )
UpperCamelCase_: List[str] = parent[s]
max_flow += path_flow
UpperCamelCase_: int = sink
while v != source:
UpperCamelCase_: List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase_: str = parent[v]
for i in range(len(lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import math
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
return math.pow(lowerCamelCase , 2 ) - a
def A__ ( lowerCamelCase ) -> float:
return 2 * x
def A__ ( lowerCamelCase ) -> float:
UpperCamelCase_: str = 2.0
while start <= a:
UpperCamelCase_: Union[str, Any] = math.pow(lowerCamelCase , 2 )
return start
def A__ ( lowerCamelCase , lowerCamelCase = 99_99 , lowerCamelCase = 0.00000000000001 ) -> float:
if a < 0:
raise ValueError("""math domain error""" )
UpperCamelCase_: Optional[int] = get_initial_point(lowerCamelCase )
for _ in range(lowerCamelCase ):
UpperCamelCase_: Any = value
UpperCamelCase_: List[Any] = value - fx(lowerCamelCase , lowerCamelCase ) / fx_derivative(lowerCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = """M-CLIP"""
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=1024 , snake_case_ : Any=768 , **snake_case_ : str ):
UpperCamelCase_: Optional[Any] = transformerDimSize
UpperCamelCase_: List[Any] = imageDimSize
super().__init__(**snake_case_ )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = MCLIPConfig
def __init__( self : List[Any] , snake_case_ : Tuple , *snake_case_ : List[Any] , **snake_case_ : List[str] ):
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCamelCase_: Optional[int] = XLMRobertaModel(snake_case_ )
UpperCamelCase_: Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
UpperCamelCase_: int = self.transformer(input_ids=snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(snake_case_ ), embs
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase_ : Optional[Any] = """Create a default config file for Accelerate with only a few flags set."""
def A__ ( lowerCamelCase="no" , lowerCamelCase = default_json_config_file , lowerCamelCase = False ) -> Union[str, Any]:
UpperCamelCase_: Any = Path(lowerCamelCase )
path.parent.mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCamelCase_: int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCamelCase_: int = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase_: Dict = torch.cuda.device_count()
UpperCamelCase_: Any = num_gpus
UpperCamelCase_: List[Any] = False
if num_gpus > 1:
UpperCamelCase_: List[Any] = """MULTI_GPU"""
else:
UpperCamelCase_: Tuple = """NO"""
elif is_xpu_available() and use_xpu:
UpperCamelCase_: Union[str, Any] = torch.xpu.device_count()
UpperCamelCase_: Dict = num_xpus
UpperCamelCase_: List[str] = False
if num_xpus > 1:
UpperCamelCase_: int = """MULTI_XPU"""
else:
UpperCamelCase_: Optional[Any] = """NO"""
elif is_npu_available():
UpperCamelCase_: List[str] = torch.npu.device_count()
UpperCamelCase_: List[str] = num_npus
UpperCamelCase_: Tuple = False
if num_npus > 1:
UpperCamelCase_: Union[str, Any] = """MULTI_NPU"""
else:
UpperCamelCase_: Union[str, Any] = """NO"""
else:
UpperCamelCase_: str = 0
UpperCamelCase_: Dict = True
UpperCamelCase_: str = 1
UpperCamelCase_: Dict = """NO"""
UpperCamelCase_: Union[str, Any] = ClusterConfig(**lowerCamelCase )
config.to_json_file(lowerCamelCase )
return path
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: Optional[Any] = parser.add_parser("""default""" , parents=lowerCamelCase , help=lowerCamelCase , formatter_class=lowerCamelCase )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=lowerCamelCase , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=lowerCamelCase )
return parser
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import re
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: Tuple = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(lowerCamelCase , lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from __future__ import annotations
from typing import TypedDict
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : str
__UpperCamelCase : int
def A__ ( lowerCamelCase ) -> list[str]:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase ) )]
def A__ ( lowerCamelCase ) -> BWTTransformDict:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
UpperCamelCase_: str = all_rotations(lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase_: BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase ),
}
return response
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
UpperCamelCase_: int = int(lowerCamelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(lowerCamelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
UpperCamelCase_: List[Any] = [""""""] * len(lowerCamelCase )
for _ in range(len(lowerCamelCase ) ):
for i in range(len(lowerCamelCase ) ):
UpperCamelCase_: List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase_ : Any = """Provide a string that I will generate its BWT transform: """
lowerCamelCase_ : int = input(entry_msg).strip()
lowerCamelCase_ : Union[str, Any] = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowerCamelCase_ : str = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: str = len(lowerCamelCase )
for _ in range(lowerCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase_, UpperCamelCase_: Dict = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ : Any = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : str = {
"""allenai/led-base-16384""": 1_63_84,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = LEDTokenizer
__UpperCamelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Any="replace" , snake_case_ : Tuple="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : str="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Any="<pad>" , snake_case_ : Tuple="<mask>" , snake_case_ : Union[str, Any]=False , snake_case_ : str=True , **snake_case_ : Union[str, Any] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
UpperCamelCase_: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: Union[str, Any] = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: Optional[Any] = add_prefix_space
UpperCamelCase_: Optional[int] = pre_tok_class(**snake_case_ )
UpperCamelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_: Union[str, Any] = """post_processor"""
UpperCamelCase_: Tuple = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
UpperCamelCase_: int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_: List[Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_: List[str] = tuple(state["""cls"""] )
UpperCamelCase_: Optional[int] = False
if state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = add_prefix_space
UpperCamelCase_: int = True
if state.get("""trim_offsets""" , snake_case_ ) != trim_offsets:
UpperCamelCase_: Any = trim_offsets
UpperCamelCase_: int = True
if changes_to_apply:
UpperCamelCase_: List[Any] = getattr(snake_case_ , state.pop("""type""" ) )
UpperCamelCase_: int = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase__ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Dict , snake_case_ : int ):
UpperCamelCase_: List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
UpperCamelCase_: List[Any] = value
def lowerCAmelCase__ ( self : Optional[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: List[str] = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : List[str]=None ):
UpperCamelCase_: List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: int = [self.sep_token_id]
UpperCamelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , snake_case_ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
UpperCamelCase_: Tuple = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase_: Any = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase_: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase_: List[str] = len(encoded_inputs["""global_attention_mask"""] ) != len(snake_case_ )
if needs_to_be_padded:
UpperCamelCase_: str = len(snake_case_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase_: List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase_: Tuple = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Optional[int] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase_ : int = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self : Any , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Dict="<|endoftext|>" , snake_case_ : Optional[int]="<|endoftext|>" , snake_case_ : Dict="<|endoftext|>" , snake_case_ : Tuple=False , snake_case_ : Optional[Any]=True , **snake_case_ : str , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case_ , merges=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , ) , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , **snake_case_ , )
UpperCamelCase_: str = add_prefix_space
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : str=None ):
UpperCamelCase_: Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
UpperCamelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : str = """mctct"""
def __init__( self : Dict , snake_case_ : str=8065 , snake_case_ : int=1536 , snake_case_ : Dict=36 , snake_case_ : Optional[int]=6144 , snake_case_ : Any=4 , snake_case_ : Tuple=384 , snake_case_ : Optional[Any]=920 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[int]=0.3 , snake_case_ : int="relu" , snake_case_ : int=0.02 , snake_case_ : str=0.3 , snake_case_ : Dict=0.3 , snake_case_ : int=1 , snake_case_ : List[str]=0 , snake_case_ : Tuple=2 , snake_case_ : List[str]=1 , snake_case_ : Tuple=0.3 , snake_case_ : List[Any]=1 , snake_case_ : Optional[Any]=(7,) , snake_case_ : Optional[int]=(3,) , snake_case_ : Union[str, Any]=80 , snake_case_ : Dict=1 , snake_case_ : Dict=None , snake_case_ : int="sum" , snake_case_ : Union[str, Any]=False , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
UpperCamelCase_: str = vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: Any = intermediate_size
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Dict = attention_head_dim
UpperCamelCase_: Optional[int] = max_position_embeddings
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: Any = layerdrop
UpperCamelCase_: List[Any] = hidden_act
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: int = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: Tuple = pad_token_id
UpperCamelCase_: List[str] = bos_token_id
UpperCamelCase_: Tuple = eos_token_id
UpperCamelCase_: str = conv_glu_dim
UpperCamelCase_: Optional[Any] = conv_dropout
UpperCamelCase_: int = num_conv_layers
UpperCamelCase_: Optional[Any] = input_feat_per_channel
UpperCamelCase_: str = input_channels
UpperCamelCase_: Any = conv_channels
UpperCamelCase_: List[Any] = ctc_loss_reduction
UpperCamelCase_: List[str] = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase_: Optional[Any] = list(snake_case_ )
UpperCamelCase_: Optional[int] = list(snake_case_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = ["""image_processor""", """feature_extractor"""]
__UpperCamelCase : Optional[int] = """TvltImageProcessor"""
__UpperCamelCase : Tuple = """TvltFeatureExtractor"""
def __init__( self : int , snake_case_ : str , snake_case_ : Dict ):
super().__init__(image_processor=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Tuple = image_processor
UpperCamelCase_: Union[str, Any] = feature_extractor
def __call__( self : Optional[Any] , snake_case_ : List[Any]=None , snake_case_ : Any=None , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict=False , snake_case_ : List[str]=False , *snake_case_ : Optional[int] , **snake_case_ : Dict , ):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
UpperCamelCase_: int = None
if images is not None:
UpperCamelCase_: str = self.image_processor(snake_case_ , mask_pixel=snake_case_ , *snake_case_ , **snake_case_ )
if images_mixed is not None:
UpperCamelCase_: Dict = self.image_processor(snake_case_ , is_mixed=snake_case_ , *snake_case_ , **snake_case_ )
if audio is not None:
UpperCamelCase_: Optional[Any] = self.feature_extractor(
snake_case_ , *snake_case_ , sampling_rate=snake_case_ , mask_audio=snake_case_ , **snake_case_ )
UpperCamelCase_: List[str] = {}
if audio is not None:
output_dict.update(snake_case_ )
if images is not None:
output_dict.update(snake_case_ )
if images_mixed_dict is not None:
output_dict.update(snake_case_ )
return output_dict
@property
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = self.image_processor.model_input_names
UpperCamelCase_: Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ : Tuple = """RegNetConfig"""
# Base docstring
lowerCamelCase_ : List[Any] = """facebook/regnet-y-040"""
lowerCamelCase_ : Any = [1, 10_88, 7, 7]
# Image classification docstring
lowerCamelCase_ : Optional[Any] = """facebook/regnet-y-040"""
lowerCamelCase_ : List[str] = """tabby, tabby cat"""
lowerCamelCase_ : Any = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : int , snake_case_ : int = 3 , snake_case_ : int = 1 , snake_case_ : int = 1 , snake_case_ : Optional[str] = "relu" , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase_: int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase_: Optional[Any] = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=snake_case_ , strides=snake_case_ , padding="""VALID""" , groups=snake_case_ , use_bias=snake_case_ , name="""convolution""" , )
UpperCamelCase_: List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
UpperCamelCase_: Dict = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Dict = self.convolution(self.padding(snake_case_ ) )
UpperCamelCase_: int = self.normalization(snake_case_ )
UpperCamelCase_: int = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : RegNetConfig , **snake_case_ : Any ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = config.num_channels
UpperCamelCase_: List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str ):
UpperCamelCase_: str = shape_list(snake_case_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase_: str = tf.transpose(snake_case_ , perm=(0, 2, 3, 1) )
UpperCamelCase_: str = self.embedder(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : int = 2 , **snake_case_ : Optional[Any] ):
super().__init__(**snake_case_ )
UpperCamelCase_: str = tf.keras.layers.ConvaD(
filters=snake_case_ , kernel_size=1 , strides=snake_case_ , use_bias=snake_case_ , name="""convolution""" )
UpperCamelCase_: int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def lowerCAmelCase__ ( self : Any , snake_case_ : tf.Tensor , snake_case_ : bool = False ):
return self.normalization(self.convolution(snake_case_ ) , training=snake_case_ )
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : int , snake_case_ : int , **snake_case_ : List[str] ):
super().__init__(**snake_case_ )
UpperCamelCase_: str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name="""pooler""" )
UpperCamelCase_: List[Any] = [
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=snake_case_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCamelCase_: List[str] = self.pooler(snake_case_ )
for layer_module in self.attention:
UpperCamelCase_: Union[str, Any] = layer_module(snake_case_ )
UpperCamelCase_: Dict = hidden_state * pooled
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , **snake_case_ : Dict ):
super().__init__(**snake_case_ )
UpperCamelCase_: List[str] = in_channels != out_channels or stride != 1
UpperCamelCase_: Tuple = max(1 , out_channels // config.groups_width )
UpperCamelCase_: Tuple = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase_: Any = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name="""layer.2""" ),
]
UpperCamelCase_: Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Tuple ):
UpperCamelCase_: Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase_: Union[str, Any] = layer_module(snake_case_ )
UpperCamelCase_: Optional[int] = self.shortcut(snake_case_ )
hidden_state += residual
UpperCamelCase_: List[str] = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , **snake_case_ : List[Any] ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = in_channels != out_channels or stride != 1
UpperCamelCase_: str = max(1 , out_channels // config.groups_width )
UpperCamelCase_: List[str] = (
TFRegNetShortCut(snake_case_ , stride=snake_case_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
UpperCamelCase_: str = [
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(snake_case_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(snake_case_ , kernel_size=1 , activation=snake_case_ , name="""layer.3""" ),
]
UpperCamelCase_: List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = hidden_state
for layer_module in self.layers:
UpperCamelCase_: str = layer_module(snake_case_ )
UpperCamelCase_: Optional[int] = self.shortcut(snake_case_ )
hidden_state += residual
UpperCamelCase_: Optional[int] = self.activation(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : RegNetConfig , snake_case_ : int , snake_case_ : int , snake_case_ : int = 2 , snake_case_ : int = 2 , **snake_case_ : Any ):
super().__init__(**snake_case_ )
UpperCamelCase_: Dict = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
UpperCamelCase_: str = [
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , snake_case_ , stride=snake_case_ , name="""layers.0""" ),
*[layer(snake_case_ , snake_case_ , snake_case_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[Any] ):
for layer_module in self.layers:
UpperCamelCase_: Any = layer_module(snake_case_ )
return hidden_state
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : RegNetConfig , **snake_case_ : Optional[int] ):
super().__init__(**snake_case_ )
UpperCamelCase_: Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
UpperCamelCase_: Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ , name=f'''stages.{i+1}''' ) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : tf.Tensor , snake_case_ : bool = False , snake_case_ : bool = True ):
UpperCamelCase_: int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase_: str = hidden_states + (hidden_state,)
UpperCamelCase_: List[Any] = stage_module(snake_case_ )
if output_hidden_states:
UpperCamelCase_: Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
@keras_serializable
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
__UpperCamelCase : Any = RegNetConfig
def __init__( self : Dict , snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
super().__init__(**snake_case_ )
UpperCamelCase_: Optional[int] = config
UpperCamelCase_: Tuple = TFRegNetEmbeddings(snake_case_ , name="""embedder""" )
UpperCamelCase_: Union[str, Any] = TFRegNetEncoder(snake_case_ , name="""encoder""" )
UpperCamelCase_: Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case_ , name="""pooler""" )
@unpack_inputs
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : tf.Tensor , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , ):
UpperCamelCase_: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: str = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: str = self.embedder(snake_case_ , training=snake_case_ )
UpperCamelCase_: List[str] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCamelCase_: Optional[int] = encoder_outputs[0]
UpperCamelCase_: Tuple = self.pooler(snake_case_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase_: Union[str, Any] = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
UpperCamelCase_: Any = tf.transpose(snake_case_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase_: List[Any] = tuple([tf.transpose(snake_case_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = RegNetConfig
__UpperCamelCase : Union[str, Any] = """regnet"""
__UpperCamelCase : str = """pixel_values"""
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase_ : Tuple = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase_ : Tuple = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _A , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Tuple , snake_case_ : RegNetConfig , *snake_case_ : int , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCamelCase_: int = TFRegNetMainLayer(snake_case_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : tf.Tensor , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : List[str]=False , ):
UpperCamelCase_: Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: Union[str, Any] = self.regnet(
pixel_values=snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _A , )
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
def __init__( self : Dict , snake_case_ : RegNetConfig , *snake_case_ : Tuple , **snake_case_ : Union[str, Any] ):
super().__init__(snake_case_ , *snake_case_ , **snake_case_ )
UpperCamelCase_: Union[str, Any] = config.num_labels
UpperCamelCase_: Optional[Any] = TFRegNetMainLayer(snake_case_ , name="""regnet""" )
# classification head
UpperCamelCase_: Tuple = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : tf.Tensor = None , snake_case_ : tf.Tensor = None , snake_case_ : bool = None , snake_case_ : bool = None , snake_case_ : Union[str, Any]=False , ):
UpperCamelCase_: int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_: List[str] = self.regnet(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , training=snake_case_ )
UpperCamelCase_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_: List[Any] = self.classifier[0](snake_case_ )
UpperCamelCase_: Dict = self.classifier[1](snake_case_ )
UpperCamelCase_: List[Any] = None if labels is None else self.hf_compute_loss(labels=snake_case_ , logits=snake_case_ )
if not return_dict:
UpperCamelCase_: Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = tempfile.mkdtemp()
UpperCamelCase_: List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase_: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase_: Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Any , **snake_case_ : List[Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Any ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_: Tuple = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Tuple = self.get_rust_tokenizer()
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
UpperCamelCase_: Optional[Any] = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: int = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[int] = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Tuple = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: str = self.prepare_image_inputs()
UpperCamelCase_: int = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: List[Any] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Dict = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: int = """lower newer"""
UpperCamelCase_: Optional[int] = processor(text=snake_case_ )
UpperCamelCase_: Any = tokenizer(snake_case_ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_image_processor()
UpperCamelCase_: List[Any] = self.get_tokenizer()
UpperCamelCase_: Any = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Tuple = """lower newer"""
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: Union[str, Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = self.get_image_processor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Any = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Dict = processor.batch_decode(snake_case_ )
UpperCamelCase_: Tuple = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = self.get_image_processor()
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Any = AlignProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[Any] = """lower newer"""
UpperCamelCase_: Dict = self.prepare_image_inputs()
UpperCamelCase_: Optional[Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
import string
import numpy
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , lowerCamelCase )
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : List[str] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__UpperCamelCase : Any = numpy.vectorize(lambda _A : x % 36 )
__UpperCamelCase : Optional[Any] = numpy.vectorize(_A )
def __init__( self : str , snake_case_ : numpy.ndarray ):
UpperCamelCase_: str = self.modulus(snake_case_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCamelCase_: Optional[Any] = encrypt_key.shape[0]
def lowerCAmelCase__ ( self : Any , snake_case_ : str ):
return self.key_string.index(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : int ):
return self.key_string[round(snake_case_ )]
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase_: Optional[Any] = det % len(self.key_string )
UpperCamelCase_: str = len(self.key_string )
if greatest_common_divisor(snake_case_ , len(self.key_string ) ) != 1:
UpperCamelCase_: List[Any] = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : str ):
UpperCamelCase_: Optional[int] = [char for char in text.upper() if char in self.key_string]
UpperCamelCase_: Tuple = chars[-1]
while len(snake_case_ ) % self.break_key != 0:
chars.append(snake_case_ )
return "".join(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: str = self.process_text(text.upper() )
UpperCamelCase_: Union[str, Any] = """"""
for i in range(0 , len(snake_case_ ) - self.break_key + 1 , self.break_key ):
UpperCamelCase_: Tuple = text[i : i + self.break_key]
UpperCamelCase_: str = [self.replace_letters(snake_case_ ) for char in batch]
UpperCamelCase_: Any = numpy.array([vec] ).T
UpperCamelCase_: List[str] = self.modulus(self.encrypt_key.dot(snake_case_ ) ).T.tolist()[
0
]
UpperCamelCase_: List[str] = """""".join(
self.replace_digits(snake_case_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCamelCase_: str = det % len(self.key_string )
UpperCamelCase_: Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCamelCase_: Union[str, Any] = i
break
UpperCamelCase_: List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case_ ) )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: Union[str, Any] = self.make_decrypt_key()
UpperCamelCase_: List[Any] = self.process_text(text.upper() )
UpperCamelCase_: Union[str, Any] = """"""
for i in range(0 , len(snake_case_ ) - self.break_key + 1 , self.break_key ):
UpperCamelCase_: Dict = text[i : i + self.break_key]
UpperCamelCase_: List[str] = [self.replace_letters(snake_case_ ) for char in batch]
UpperCamelCase_: Optional[int] = numpy.array([vec] ).T
UpperCamelCase_: Optional[Any] = self.modulus(decrypt_key.dot(snake_case_ ) ).T.tolist()[0]
UpperCamelCase_: Union[str, Any] = """""".join(
self.replace_digits(snake_case_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A__ ( ) -> None:
UpperCamelCase_: Optional[Any] = int(input("""Enter the order of the encryption key: """ ) )
UpperCamelCase_: str = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
UpperCamelCase_: List[Any] = HillCipher(numpy.array(lowerCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCamelCase_: Dict = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCamelCase_: Optional[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
UpperCamelCase_: Optional[int] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = """deit"""
def __init__( self : str , snake_case_ : Tuple=768 , snake_case_ : str=12 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[int]=3072 , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : str=0.02 , snake_case_ : Optional[int]=1e-12 , snake_case_ : Optional[Any]=224 , snake_case_ : Any=16 , snake_case_ : str=3 , snake_case_ : Dict=True , snake_case_ : str=16 , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
UpperCamelCase_: List[Any] = hidden_size
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: List[Any] = num_attention_heads
UpperCamelCase_: Any = intermediate_size
UpperCamelCase_: List[str] = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: Dict = attention_probs_dropout_prob
UpperCamelCase_: str = initializer_range
UpperCamelCase_: List[str] = layer_norm_eps
UpperCamelCase_: Any = image_size
UpperCamelCase_: Dict = patch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: List[str] = qkv_bias
UpperCamelCase_: Optional[int] = encoder_stride
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : str ):
return 1e-4
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
lowerCamelCase_ : str = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase_ : list[bool | None] = [None] * 10_00_00_00
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Tuple = False
def A__ ( lowerCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase_: List[Any] = chain(next_number(lowerCamelCase ) )
UpperCamelCase_: str = number_chain
while number < 10_00_00_00:
UpperCamelCase_: Tuple = number_chain
number *= 10
return number_chain
def A__ ( lowerCamelCase = 10_00_00_00 ) -> int:
for i in range(1 , lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCamelCase_ : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCamelCase_ : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def A__ ( lowerCamelCase , lowerCamelCase ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowerCamelCase ) - np.asarray(lowerCamelCase )) ** 2 ) )
def A__ ( lowerCamelCase , lowerCamelCase ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowerCamelCase , lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A__ ( ) -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
def A__ ( lowerCamelCase ) -> int:
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase )
if number < 1:
UpperCamelCase_: Optional[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase )
UpperCamelCase_: Optional[Any] = 1
for i in range(1 , lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
from __future__ import annotations
from typing import Any
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Any ):
UpperCamelCase_: Any = data
UpperCamelCase_: Node | None = None
def __iter__( self : str ):
UpperCamelCase_: Optional[Any] = self
UpperCamelCase_: List[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(snake_case_ )
yield node.data
UpperCamelCase_: List[Any] = node.next_node
@property
def lowerCAmelCase__ ( self : Dict ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase_ : str = Node(1)
lowerCamelCase_ : Tuple = Node(2)
lowerCamelCase_ : Tuple = Node(3)
lowerCamelCase_ : Any = Node(4)
print(root_node.has_loop) # False
lowerCamelCase_ : List[Any] = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase_ : Dict = Node(5)
lowerCamelCase_ : List[Any] = Node(6)
lowerCamelCase_ : Union[str, Any] = Node(5)
lowerCamelCase_ : Any = Node(6)
print(root_node.has_loop) # False
lowerCamelCase_ : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: Tuple = 0
for ch in input_str:
UpperCamelCase_: Optional[Any] = ord(lowerCamelCase )
UpperCamelCase_: Any = pow(2 , lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A__ ( *lowerCamelCase ) -> List[Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Dict = list(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
UpperCamelCase_: Tuple = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: List[Any] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCamelCase , lowerCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A__ ( lowerCamelCase = None , lowerCamelCase = 1_28 ) -> Any:
if function is None:
return functools.partial(lowerCamelCase , starting_batch_size=lowerCamelCase )
UpperCamelCase_: Dict = starting_batch_size
def decorator(*lowerCamelCase , **lowerCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase_: int = list(inspect.signature(lowerCamelCase ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase ) < (len(lowerCamelCase ) + 1):
UpperCamelCase_: Optional[Any] = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ : str = logging.get_logger(__name__)
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: str = b.T
UpperCamelCase_: int = np.sum(np.square(lowerCamelCase ) , axis=1 )
UpperCamelCase_: Optional[Any] = np.sum(np.square(lowerCamelCase ) , axis=0 )
UpperCamelCase_: str = np.matmul(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: int = aa[:, None] - 2 * ab + ba[None, :]
return d
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: Tuple = x.reshape(-1 , 3 )
UpperCamelCase_: int = squared_euclidean_distance(lowerCamelCase , lowerCamelCase )
return np.argmin(lowerCamelCase , axis=1 )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""pixel_values"""]
def __init__( self : Optional[Any] , snake_case_ : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : bool = True , **snake_case_ : int , ):
super().__init__(**snake_case_ )
UpperCamelCase_: List[Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCamelCase_: str = get_size_dict(snake_case_ )
UpperCamelCase_: Optional[Any] = np.array(snake_case_ ) if clusters is not None else None
UpperCamelCase_: Dict = do_resize
UpperCamelCase_: Optional[Any] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: str = do_normalize
UpperCamelCase_: Optional[Any] = do_color_quantize
def lowerCAmelCase__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[Any] , ):
UpperCamelCase_: Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
snake_case_ , size=(size["""height"""], size["""width"""]) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , ):
UpperCamelCase_: List[Any] = rescale(image=snake_case_ , scale=1 / 127.5 , data_format=snake_case_ )
UpperCamelCase_: Tuple = image - 1
return image
def lowerCAmelCase__ ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **snake_case_ : Tuple , ):
UpperCamelCase_: str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Optional[int] = size if size is not None else self.size
UpperCamelCase_: str = get_size_dict(snake_case_ )
UpperCamelCase_: str = resample if resample is not None else self.resample
UpperCamelCase_: int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Dict = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase_: Optional[Any] = clusters if clusters is not None else self.clusters
UpperCamelCase_: Dict = np.array(snake_case_ )
UpperCamelCase_: str = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase_: Dict = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
UpperCamelCase_: int = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_normalize:
UpperCamelCase_: Dict = [self.normalize(image=snake_case_ ) for image in images]
if do_color_quantize:
UpperCamelCase_: Any = [to_channel_dimension_format(snake_case_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase_: Any = np.array(snake_case_ )
UpperCamelCase_: Tuple = color_quantize(snake_case_ , snake_case_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase_: Tuple = images.shape[0]
UpperCamelCase_: Optional[int] = images.reshape(snake_case_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase_: List[Any] = list(snake_case_ )
else:
UpperCamelCase_: Any = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
UpperCamelCase_: Dict = {"""input_ids""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Optional[Any] = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase_ : Union[str, Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase_ : Union[str, Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase_ : List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def A__ ( lowerCamelCase , lowerCamelCase ) -> tuple[str, float]:
UpperCamelCase_: Optional[int] = len([g for position, g in enumerate(lowerCamelCase ) if g == main_target[position]] )
return (item, float(lowerCamelCase ))
def A__ ( lowerCamelCase , lowerCamelCase ) -> tuple[str, str]:
UpperCamelCase_: Optional[int] = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCamelCase_: Dict = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase_: Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Optional[int] = list(lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCamelCase_: Optional[Any] = random.choice(lowerCamelCase )
return "".join(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> list[str]:
UpperCamelCase_: Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCamelCase_: List[str] = int(parent_a[1] * 1_00 ) + 1
UpperCamelCase_: List[str] = 10 if child_n >= 10 else child_n
for _ in range(lowerCamelCase ):
UpperCamelCase_: List[str] = population_score[random.randint(0 , lowerCamelCase )][0]
UpperCamelCase_, UpperCamelCase_: Tuple = crossover(parent_a[0] , lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
return pop
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCamelCase_: Union[str, Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase_: str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase_: Union[str, Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCamelCase )
# Generate random starting population.
UpperCamelCase_: Union[str, Any] = []
for _ in range(lowerCamelCase ):
population.append("""""".join([random.choice(lowerCamelCase ) for i in range(len(lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase_: Dict = [evaluate(lowerCamelCase , lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase_: Any = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase_: List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCamelCase_: Optional[Any] = [
(item, score / len(lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase ):
population.extend(select(population_score[int(lowerCamelCase )] , lowerCamelCase , lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase_ : Any = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowerCamelCase_ : str = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A__ ( lowerCamelCase = 8 ) -> str:
UpperCamelCase_: int = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCamelCase )
UpperCamelCase_: List[str] = i // 3
UpperCamelCase_: Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase_: Optional[int] = (
chars_incl
+ random(lowerCamelCase , quotient + remainder )
+ random(lowerCamelCase , lowerCamelCase )
+ random(lowerCamelCase , lowerCamelCase )
)
UpperCamelCase_: int = list(lowerCamelCase )
shuffle(lowerCamelCase )
return "".join(lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
pass # Put your code here...
def A__ ( lowerCamelCase , lowerCamelCase ) -> Dict:
pass # Put your code here...
def A__ ( lowerCamelCase , lowerCamelCase ) -> Dict:
pass # Put your code here...
def A__ ( lowerCamelCase , lowerCamelCase = 8 ) -> bool:
if len(lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase_: Dict = any(char in ascii_uppercase for char in password )
UpperCamelCase_: str = any(char in ascii_lowercase for char in password )
UpperCamelCase_: Tuple = any(char in digits for char in password )
UpperCamelCase_: int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A__ ( ) -> str:
UpperCamelCase_: Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
UpperCamelCase_: List[Any] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCamelCase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCamelCase , lowerCamelCase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__UpperCamelCase : int = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , snake_case_ : Any=None , snake_case_ : List[str]=None , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
UpperCamelCase_: Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Tuple , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : Any=None , **snake_case_ : List[str] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
UpperCamelCase_: str = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
UpperCamelCase_: Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def lowerCAmelCase__ ( self : Tuple , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : int , **snake_case_ : str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = self.tokenizer.model_input_names
UpperCamelCase_: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCamelCase_ : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCamelCase_ : List[str] = {
"""allenai/longformer-base-4096""": 40_96,
"""allenai/longformer-large-4096""": 40_96,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 40_96,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 40_96,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A__ ( ) -> int:
UpperCamelCase_: Any = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase_: List[str] = bs[:]
UpperCamelCase_: Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase_: List[Any] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
UpperCamelCase_: Tuple = set()
UpperCamelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_: str = char
return pairs
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any]="replace" , snake_case_ : Union[str, Any]="<s>" , snake_case_ : Tuple="</s>" , snake_case_ : int="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : Optional[Any]="<unk>" , snake_case_ : Optional[Any]="<pad>" , snake_case_ : List[Any]="<mask>" , snake_case_ : Optional[int]=False , **snake_case_ : Tuple , ):
UpperCamelCase_: str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCamelCase_: str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCamelCase_: Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCamelCase_: Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCamelCase_: List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCamelCase_: List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_: Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase_: Tuple = json.load(snake_case_ )
UpperCamelCase_: Any = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: Optional[int] = errors # how to handle errors in decoding
UpperCamelCase_: Union[str, Any] = bytes_to_unicode()
UpperCamelCase_: List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase_: Tuple = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase_: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_: Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Tuple = {}
UpperCamelCase_: str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_: Optional[int] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__ ( self : Tuple ):
return len(self.encoder )
def lowerCAmelCase__ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
UpperCamelCase_: Any = tuple(snake_case_ )
UpperCamelCase_: Union[str, Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCamelCase_: Any = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = bigram
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: List[str] = 0
while i < len(snake_case_ ):
try:
UpperCamelCase_: Tuple = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_: List[str] = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_: Union[str, Any] = tuple(snake_case_ )
UpperCamelCase_: str = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCamelCase_: Tuple = get_pairs(snake_case_ )
UpperCamelCase_: List[str] = """ """.join(snake_case_ )
UpperCamelCase_: Union[str, Any] = word
return word
def lowerCAmelCase__ ( self : Dict , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = []
for token in re.findall(self.pat , snake_case_ ):
UpperCamelCase_: Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Any ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Union[str, Any] ):
return self.decoder.get(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : Optional[Any] ):
UpperCamelCase_: Any = """""".join(snake_case_ )
UpperCamelCase_: List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Dict = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Optional[int] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
UpperCamelCase_: Tuple = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase_: List[str] = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_: Union[str, Any] = [self.cls_token_id]
UpperCamelCase_: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Optional[int] = [self.sep_token_id]
UpperCamelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCamelCase_: str = """ """ + text
return (text, kwargs)
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: Dict = [0] * len(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCamelCase_: Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase_: Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase_: str = j
return prefix_result
def A__ ( lowerCamelCase ) -> int:
return max(prefix_function(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[int] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : int ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(snake_case_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = None
ops.enable_eager_execution_internal()
UpperCamelCase_: Union[str, Any] = tf.config.list_physical_devices("""CPU""" )
if len(snake_case_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCamelCase_: Optional[Any] = tf.config.list_logical_devices(device_type="""CPU""" )
UpperCamelCase_: Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCamelCase_: Dict = GradientAccumulator()
UpperCamelCase_: List[Any] = tf.Variable([4.0, 3.0] )
UpperCamelCase_, UpperCamelCase_: Any = create_optimizer(5e-5 , 10 , 5 )
UpperCamelCase_: Any = tf.Variable([0.0, 0.0] , trainable=snake_case_ )
def accumulate_on_replica(snake_case_ : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(snake_case_ : Optional[Any] , snake_case_ : Any ):
with strategy.scope():
UpperCamelCase_: Union[str, Any] = strategy.experimental_local_results(snake_case_ )
local_variables[0].assign(snake_case_ )
local_variables[1].assign(snake_case_ )
strategy.run(snake_case_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(snake_case_ )
def _check_local_values(snake_case_ : Union[str, Any] , snake_case_ : str ):
UpperCamelCase_: str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , snake_case_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , snake_case_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
if index == number_of_items:
return 0
UpperCamelCase_: Union[str, Any] = 0
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Union[str, Any] = knapsack(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase_: int = values[index] + knapsack(
lowerCamelCase , lowerCamelCase , lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : str = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = """glpn"""
def __init__( self : Tuple , snake_case_ : int=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : str=[8, 4, 2, 1] , snake_case_ : Dict=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : Optional[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : int=[4, 4, 4, 4] , snake_case_ : Optional[Any]="gelu" , snake_case_ : List[str]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : int=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Tuple=1e-6 , snake_case_ : Any=64 , snake_case_ : Dict=10 , snake_case_ : Optional[Any]=-1 , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = num_channels
UpperCamelCase_: Tuple = num_encoder_blocks
UpperCamelCase_: str = depths
UpperCamelCase_: Any = sr_ratios
UpperCamelCase_: Tuple = hidden_sizes
UpperCamelCase_: Optional[int] = patch_sizes
UpperCamelCase_: Optional[int] = strides
UpperCamelCase_: Any = mlp_ratios
UpperCamelCase_: Tuple = num_attention_heads
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: int = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: Union[str, Any] = drop_path_rate
UpperCamelCase_: List[str] = layer_norm_eps
UpperCamelCase_: Union[str, Any] = decoder_hidden_size
UpperCamelCase_: Optional[Any] = max_depth
UpperCamelCase_: Dict = head_in_index
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
import math
import unittest
def A__ ( lowerCamelCase ) -> bool:
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase__ ( self : Tuple ):
with self.assertRaises(snake_case_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: List[str] = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCamelCase_: Optional[Any] = MaskFormerConfig(backbone_config=lowerCamelCase )
UpperCamelCase_: str = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase_: Dict = 8_47
UpperCamelCase_: Optional[int] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
UpperCamelCase_: Optional[int] = 1_50
UpperCamelCase_: int = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase_: Tuple = 1_71
UpperCamelCase_: Optional[int] = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
UpperCamelCase_: Any = 1_33
UpperCamelCase_: Tuple = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase_: List[Any] = 19
UpperCamelCase_: int = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
UpperCamelCase_: Any = 65
UpperCamelCase_: List[str] = """mapillary-vistas-id2label.json"""
UpperCamelCase_: Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def A__ ( lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Tuple = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: Union[str, Any] = dct.pop(lowerCamelCase )
UpperCamelCase_: Optional[Any] = val
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase_: Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_: Optional[Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase_: Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: int = in_proj_weight[:dim, :]
UpperCamelCase_: Any = in_proj_bias[: dim]
UpperCamelCase_: Any = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_: int = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_: Dict = in_proj_weight[
-dim :, :
]
UpperCamelCase_: Tuple = in_proj_bias[-dim :]
# fmt: on
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
# fmt: off
UpperCamelCase_: Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase_: int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
UpperCamelCase_: str = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: Optional[int] = in_proj_weight[: hidden_size, :]
UpperCamelCase_: Tuple = in_proj_bias[:config.hidden_size]
UpperCamelCase_: Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase_: str = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_: Dict = in_proj_weight[-hidden_size :, :]
UpperCamelCase_: Dict = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase_: str = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
UpperCamelCase_: int = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: str = in_proj_weight[: hidden_size, :]
UpperCamelCase_: Optional[Any] = in_proj_bias[:config.hidden_size]
UpperCamelCase_: Union[str, Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase_: List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_: Tuple = in_proj_weight[-hidden_size :, :]
UpperCamelCase_: Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def A__ ( ) -> torch.Tensor:
UpperCamelCase_: Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_: Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> Tuple:
UpperCamelCase_: List[Any] = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
UpperCamelCase_: Dict = pickle.load(lowerCamelCase )
UpperCamelCase_: List[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase_: int = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase )
# load 🤗 model
UpperCamelCase_: List[str] = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
UpperCamelCase_, UpperCamelCase_: Optional[Any] = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
UpperCamelCase_: Union[str, Any] = prepare_img()
if "vistas" in model_name:
UpperCamelCase_: List[Any] = 65
elif "cityscapes" in model_name:
UpperCamelCase_: Optional[Any] = 6_55_35
else:
UpperCamelCase_: Optional[int] = 2_55
UpperCamelCase_: Optional[Any] = True if """ade""" in model_name else False
UpperCamelCase_: List[str] = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
UpperCamelCase_: Dict = image_processor(lowerCamelCase , return_tensors="""pt""" )
UpperCamelCase_: Any = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase_: Optional[int] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
import torch
from transformers import AutoModel
class _UpperCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int="sayef/fsner-bert-base-uncased" ):
super(snake_case_ , self ).__init__()
UpperCamelCase_: Optional[int] = AutoModel.from_pretrained(snake_case_ , return_dict=snake_case_ )
UpperCamelCase_: Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase_: int = torch.nn.Softmax(dim=1 )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : List[Any] ):
return self.bert(**snake_case_ ).last_hidden_state
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[str] ):
return token_embeddings.sum(2 , keepdim=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any]=1 ):
return self.softmax(T * self.cos(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
UpperCamelCase_: Any = W_supports["""sizes"""].tolist()
UpperCamelCase_: Optional[Any] = W_supports["""start_token_id"""].item()
UpperCamelCase_: Optional[int] = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase_: Optional[Any] = self.BERT(**snake_case_ )
UpperCamelCase_: Any = self.BERT(**snake_case_ )
UpperCamelCase_: List[Any] = None
UpperCamelCase_: List[Any] = None
UpperCamelCase_: List[Any] = W_supports["""input_ids"""] == start_token_id
UpperCamelCase_: List[Any] = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(snake_case_ ):
if i == 0:
UpperCamelCase_: Dict = 0
else:
UpperCamelCase_: Tuple = support_sizes[i - 1]
UpperCamelCase_: Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase_: Dict = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase_: Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase_: Optional[int] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase_: List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase_: Any = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase_: Optional[Any] = p_start
UpperCamelCase_: Any = p_end
return p_starts, p_ends
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Dict = LEDConfig
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : List[str] = """gelu"""
def __init__( self : int , snake_case_ : Optional[int] , snake_case_ : Optional[int]=13 , snake_case_ : List[str]=7 , snake_case_ : Any=True , snake_case_ : List[str]=False , snake_case_ : Dict=99 , snake_case_ : Optional[int]=32 , snake_case_ : str=2 , snake_case_ : List[Any]=4 , snake_case_ : str=37 , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : List[Any]=20 , snake_case_ : Tuple=2 , snake_case_ : int=1 , snake_case_ : Union[str, Any]=0 , snake_case_ : List[str]=4 , ):
UpperCamelCase_: str = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: Dict = seq_length
UpperCamelCase_: List[Any] = is_training
UpperCamelCase_: List[str] = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: str = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: Dict = hidden_dropout_prob
UpperCamelCase_: Dict = attention_probs_dropout_prob
UpperCamelCase_: Dict = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Union[str, Any] = pad_token_id
UpperCamelCase_: int = bos_token_id
UpperCamelCase_: Union[str, Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCamelCase_: Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCamelCase_: Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCamelCase_: List[str] = prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
UpperCamelCase_: Optional[Any] = global_attention_mask
return config, inputs_dict
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str] , snake_case_ : int ):
UpperCamelCase_: str = TFLEDModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: str = inputs_dict["""input_ids"""]
UpperCamelCase_: Tuple = input_ids[:1, :]
UpperCamelCase_: Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Dict = 1
# first forward pass
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_: Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_: Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_: int = model(snake_case_ , attention_mask=snake_case_ )[0]
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_: List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_: int = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_: Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> List[Any]:
if attention_mask is None:
UpperCamelCase_: Any = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__UpperCamelCase : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : str = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = True
__UpperCamelCase : List[str] = False
__UpperCamelCase : str = False
__UpperCamelCase : int = False
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = TFLEDModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: Tuple = tf.zeros_like(inputs_dict["""attention_mask"""] )
UpperCamelCase_: int = 2
UpperCamelCase_: List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
UpperCamelCase_: Any = True
UpperCamelCase_: Tuple = self.model_tester.seq_length
UpperCamelCase_: Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ : List[Any] ):
UpperCamelCase_: List[str] = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ : Dict ):
UpperCamelCase_: Dict = [t.numpy() for t in outputs.encoder_attentions]
UpperCamelCase_: Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCamelCase_: Dict = True
UpperCamelCase_: Dict = False
UpperCamelCase_: Tuple = False
UpperCamelCase_: List[str] = model_class(snake_case_ )
UpperCamelCase_: List[Any] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase_: str = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase_: List[Any] = model_class(snake_case_ )
UpperCamelCase_: List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase_: List[Any] = True
UpperCamelCase_: Dict = model_class(snake_case_ )
UpperCamelCase_: List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase_: str = True
UpperCamelCase_: Tuple = True
UpperCamelCase_: Any = model_class(snake_case_ )
UpperCamelCase_: Dict = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowerCAmelCase__ ( self : Tuple ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
# TODO: Head-masking not yet implement
pass
def A__ ( lowerCamelCase ) -> List[Any]:
return tf.constant(lowerCamelCase , dtype=tf.intaa )
lowerCamelCase_ : Dict = 1E-4
@slow
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
UpperCamelCase_: Dict = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_: int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_: Dict = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
UpperCamelCase_: Optional[Any] = model(**snake_case_ )[0]
UpperCamelCase_: List[str] = (1, 1024, 768)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
UpperCamelCase_: List[str] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-3 )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
UpperCamelCase_: Union[str, Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_: Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
UpperCamelCase_: str = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = model(**snake_case_ )[0]
UpperCamelCase_: Optional[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
UpperCamelCase_: int = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-3 , rtol=1e-3 )
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : int , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Any , snake_case_ : int=1 , snake_case_ : Dict=False , **snake_case_ : str ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = vocab_size
UpperCamelCase_: List[Any] = d_embed
UpperCamelCase_: Any = d_proj
UpperCamelCase_: Any = cutoffs + [vocab_size]
UpperCamelCase_: Any = [0] + self.cutoffs
UpperCamelCase_: List[Any] = div_val
UpperCamelCase_: Tuple = self.cutoffs[0]
UpperCamelCase_: Tuple = len(self.cutoffs ) - 1
UpperCamelCase_: Dict = self.shortlist_size + self.n_clusters
UpperCamelCase_: int = keep_order
UpperCamelCase_: Dict = []
UpperCamelCase_: Tuple = []
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] ):
if self.n_clusters > 0:
UpperCamelCase_: Dict = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=snake_case_ , name="""cluster_weight""" )
UpperCamelCase_: Union[str, Any] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=snake_case_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase_: Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(snake_case_ )
else:
self.out_projs.append(snake_case_ )
UpperCamelCase_: Tuple = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCamelCase_: int = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_, UpperCamelCase_: List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_: str = self.d_embed // (self.div_val**i)
UpperCamelCase_: str = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_projs_._{i}''' )
self.out_projs.append(snake_case_ )
UpperCamelCase_: List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_layers_._{i}_._weight''' , )
UpperCamelCase_: str = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=snake_case_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : str=None ):
UpperCamelCase_: Optional[int] = x
if proj is not None:
UpperCamelCase_: Optional[Any] = tf.einsum("""ibd,ed->ibe""" , snake_case_ , snake_case_ )
return tf.einsum("""ibd,nd->ibn""" , snake_case_ , snake_case_ ) + b
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Any , snake_case_ : List[str] ):
UpperCamelCase_: str = shape_list(snake_case_ )
UpperCamelCase_: Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase_: Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str]=True , snake_case_ : List[Any]=False ):
UpperCamelCase_: List[str] = 0
if self.n_clusters == 0:
UpperCamelCase_: Optional[int] = self._logit(snake_case_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase_: List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case_ , logits=snake_case_ )
UpperCamelCase_: str = tf.nn.log_softmax(snake_case_ , axis=-1 )
else:
UpperCamelCase_: Dict = shape_list(snake_case_ )
UpperCamelCase_: str = []
UpperCamelCase_: Union[str, Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase_, UpperCamelCase_: List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase_: Union[str, Any] = (target >= l_idx) & (target < r_idx)
UpperCamelCase_: Dict = tf.where(snake_case_ )
UpperCamelCase_: Dict = tf.boolean_mask(snake_case_ , snake_case_ ) - l_idx
if self.div_val == 1:
UpperCamelCase_: str = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase_: Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase_: Union[str, Any] = self.out_layers[i][0]
UpperCamelCase_: Dict = self.out_layers[i][1]
if i == 0:
UpperCamelCase_: Tuple = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase_: int = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase_: str = self._logit(snake_case_ , snake_case_ , snake_case_ , self.out_projs[0] )
UpperCamelCase_: List[Any] = tf.nn.log_softmax(snake_case_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase_: Union[str, Any] = tf.boolean_mask(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[Any] = self._gather_logprob(snake_case_ , snake_case_ )
else:
UpperCamelCase_: Dict = self._logit(snake_case_ , snake_case_ , snake_case_ , self.out_projs[i] )
UpperCamelCase_: Tuple = tf.nn.log_softmax(snake_case_ )
UpperCamelCase_: Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase_: int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case_ )
if target is not None:
UpperCamelCase_: Optional[int] = tf.boolean_mask(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = tf.boolean_mask(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = self._gather_logprob(snake_case_ , snake_case_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case_ , -cur_logprob , shape_list(snake_case_ ) )
UpperCamelCase_: Optional[int] = tf.concat(snake_case_ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase_: Optional[Any] = tf.reduce_mean(snake_case_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : List[Any] = random.Random()
def A__ ( lowerCamelCase , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=None ) -> int:
if rng is None:
UpperCamelCase_: int = global_rng
UpperCamelCase_: Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , snake_case_ : Dict , snake_case_ : int=7 , snake_case_ : Any=400 , snake_case_ : Any=2000 , snake_case_ : Any=10 , snake_case_ : List[Any]=160 , snake_case_ : int=8 , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[Any]=4000 , snake_case_ : str=False , snake_case_ : int=True , ):
UpperCamelCase_: Tuple = parent
UpperCamelCase_: List[Any] = batch_size
UpperCamelCase_: Optional[int] = min_seq_length
UpperCamelCase_: List[Any] = max_seq_length
UpperCamelCase_: Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_: int = padding_value
UpperCamelCase_: Optional[Any] = sampling_rate
UpperCamelCase_: List[Any] = return_attention_mask
UpperCamelCase_: Any = do_normalize
UpperCamelCase_: str = feature_size
UpperCamelCase_: List[str] = chunk_length
UpperCamelCase_: int = hop_length
def lowerCAmelCase__ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any]=False , snake_case_ : Tuple=False ):
def _flatten(snake_case_ : Union[str, Any] ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
UpperCamelCase_: Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_: Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_: Tuple = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[str] = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Dict = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
UpperCamelCase_: str = self.feature_extraction_class.from_pretrained(snake_case_ )
UpperCamelCase_: Union[str, Any] = feat_extract_first.to_dict()
UpperCamelCase_: Optional[Any] = feat_extract_second.to_dict()
UpperCamelCase_: Tuple = feat_extract_first.mel_filters
UpperCamelCase_: Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case_ )
UpperCamelCase_: int = self.feature_extraction_class.from_json_file(snake_case_ )
UpperCamelCase_: int = feat_extract_first.to_dict()
UpperCamelCase_: Any = feat_extract_second.to_dict()
UpperCamelCase_: Union[str, Any] = feat_extract_first.mel_filters
UpperCamelCase_: Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_: Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_: Dict = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_: Dict = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
UpperCamelCase_: Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
UpperCamelCase_: Union[str, Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: Tuple = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_: List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_: Union[str, Any] = np.asarray(snake_case_ )
UpperCamelCase_: Optional[int] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test truncation required
UpperCamelCase_: int = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase_: Optional[int] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
UpperCamelCase_: Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase_: Optional[Any] = [np.asarray(snake_case_ ) for speech_input in speech_inputs_truncated]
UpperCamelCase_: Optional[int] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
UpperCamelCase_: str = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def lowerCAmelCase__ ( self : int ):
import torch
UpperCamelCase_: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: Any = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_: Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_: List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_: Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str ):
UpperCamelCase_: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase_: Tuple = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : Tuple ):
# fmt: off
UpperCamelCase_: List[Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase_: int = self._load_datasamples(1 )
UpperCamelCase_: Union[str, Any] = WhisperFeatureExtractor()
UpperCamelCase_: List[Any] = feature_extractor(snake_case_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case_ , atol=1e-4 ) )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: Tuple = self._load_datasamples(1 )[0]
UpperCamelCase_: Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
UpperCamelCase_: Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case_ )[0]
self.assertTrue(np.all(np.mean(snake_case_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ ) - 1 ) < 1e-3 ) )
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : List[str] = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["""MobileNetV2FeatureExtractor"""]
lowerCamelCase_ : Union[str, Any] = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
def A__ ( lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = len(lowerCamelCase )
UpperCamelCase_: Tuple = sum(lowerCamelCase )
UpperCamelCase_: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCamelCase_: Dict = True
for i in range(1 , s + 1 ):
UpperCamelCase_: Tuple = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCamelCase_: Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCamelCase_: List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCamelCase_: Dict = s - 2 * j
break
return diff
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
from __future__ import annotations
import math
def A__ ( lowerCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( lowerCamelCase ) -> list[int]:
UpperCamelCase_: Optional[Any] = str(lowerCamelCase )
UpperCamelCase_: List[str] = [n]
for i in range(1 , len(lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A__ ( lowerCamelCase ) -> bool:
if len(str(lowerCamelCase ) ) > 3:
if not is_prime(int(str(lowerCamelCase )[-3:] ) ) or not is_prime(int(str(lowerCamelCase )[:3] ) ):
return False
return True
def A__ ( lowerCamelCase = 11 ) -> list[int]:
UpperCamelCase_: list[int] = []
UpperCamelCase_: Dict = 13
while len(lowerCamelCase ) != count:
if validate(lowerCamelCase ):
UpperCamelCase_: Dict = list_truncated_nums(lowerCamelCase )
if all(is_prime(lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase )
num += 2
return list_truncated_primes
def A__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = """▁"""
lowerCamelCase_ : List[str] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
lowerCamelCase_ : Union[str, Any] = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
lowerCamelCase_ : str = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self : List[Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]="<s>" , snake_case_ : int="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Tuple="<unk>" , snake_case_ : Dict="m2m100" , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : Tuple=8 , **snake_case_ : Optional[int] , ):
UpperCamelCase_: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_: str = language_codes
UpperCamelCase_: int = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCamelCase_: Optional[int] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCamelCase_: Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
UpperCamelCase_: Any = vocab_file
UpperCamelCase_: Optional[Any] = load_json(snake_case_ )
UpperCamelCase_: List[str] = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: Optional[Any] = spm_file
UpperCamelCase_: str = load_spm(snake_case_ , self.sp_model_kwargs )
UpperCamelCase_: Union[str, Any] = len(self.encoder )
UpperCamelCase_: Optional[int] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
UpperCamelCase_: Tuple = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
UpperCamelCase_: Tuple = {v: k for k, v in self.lang_token_to_id.items()}
UpperCamelCase_: int = src_lang if src_lang is not None else """en"""
UpperCamelCase_: Optional[Any] = tgt_lang
UpperCamelCase_: str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCamelCase_: Optional[Any] = num_madeup_words
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__ ( self : int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Tuple ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self : Any , snake_case_ : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Any ):
UpperCamelCase_: Any = []
UpperCamelCase_: Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase_: Optional[int] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
UpperCamelCase_: Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCamelCase_: str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
UpperCamelCase_: Any = self.__dict__.copy()
UpperCamelCase_: Any = None
return state
def __setstate__( self : Union[str, Any] , snake_case_ : Dict ):
UpperCamelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_: List[Any] = {}
UpperCamelCase_: Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Tuple = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCamelCase_: str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
UpperCamelCase_: Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
UpperCamelCase_: Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : str = "en" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro" , **snake_case_ : Dict , ):
UpperCamelCase_: Tuple = src_lang
UpperCamelCase_: str = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : Dict , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase_: str = src_lang
UpperCamelCase_: Optional[Any] = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = self.get_lang_id(snake_case_ )
UpperCamelCase_: str = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self : List[Any] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self : Union[str, Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: int = self.get_lang_token(snake_case_ )
UpperCamelCase_: Dict = self.lang_token_to_id[lang_token]
UpperCamelCase_: List[str] = [self.cur_lang_id]
UpperCamelCase_: Dict = [self.eos_token_id]
def lowerCAmelCase__ ( self : Any , snake_case_ : str ):
UpperCamelCase_: List[str] = self.get_lang_token(snake_case_ )
UpperCamelCase_: Any = self.lang_token_to_id[lang_token]
UpperCamelCase_: Tuple = [self.cur_lang_id]
UpperCamelCase_: int = [self.eos_token_id]
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : str ):
return self.lang_code_to_token[lang]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
UpperCamelCase_: Optional[int] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def A__ ( lowerCamelCase , lowerCamelCase ) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase_: Dict = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def A__ ( lowerCamelCase ) -> Union[Dict, List]:
with open(lowerCamelCase , """r""" ) as f:
return json.load(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=2 )
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = UnCLIPImageVariationPipeline
__UpperCamelCase : str = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
__UpperCamelCase : List[Any] = IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Optional[Any] = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
__UpperCamelCase : Optional[Any] = False
@property
def lowerCAmelCase__ ( self : List[Any] ):
return 32
@property
def lowerCAmelCase__ ( self : int ):
return 32
@property
def lowerCAmelCase__ ( self : Tuple ):
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return 100
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def lowerCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case_ )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: int = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
UpperCamelCase_: int = UnCLIPTextProjModel(**snake_case_ )
return model
@property
def lowerCAmelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase_: List[Any] = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
UpperCamelCase_: str = UNetaDConditionModel(**snake_case_ )
return model
@property
def lowerCAmelCase__ ( self : int ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase__ ( self : List[Any] ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
UpperCamelCase_: Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[str] = self.dummy_decoder
UpperCamelCase_: str = self.dummy_text_proj
UpperCamelCase_: List[str] = self.dummy_text_encoder
UpperCamelCase_: int = self.dummy_tokenizer
UpperCamelCase_: Tuple = self.dummy_super_res_first
UpperCamelCase_: Dict = self.dummy_super_res_last
UpperCamelCase_: str = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
UpperCamelCase_: List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
UpperCamelCase_: List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCamelCase_: Dict = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[int]=0 , snake_case_ : Optional[Any]=True ):
UpperCamelCase_: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: int = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
if pil_image:
UpperCamelCase_: Optional[Any] = input_image * 0.5 + 0.5
UpperCamelCase_: Tuple = input_image.clamp(0 , 1 )
UpperCamelCase_: Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase_: Union[str, Any] = DiffusionPipeline.numpy_to_pil(snake_case_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[int] = """cpu"""
UpperCamelCase_: Optional[Any] = self.get_dummy_components()
UpperCamelCase_: List[Any] = self.pipeline_class(**snake_case_ )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: int = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Tuple = pipe(**snake_case_ )
UpperCamelCase_: List[Any] = output.images
UpperCamelCase_: str = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Dict = pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
UpperCamelCase_: str = image[0, -3:, -3:, -1]
UpperCamelCase_: Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Union[str, Any] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[int] = """cpu"""
UpperCamelCase_: List[str] = self.get_dummy_components()
UpperCamelCase_: Optional[Any] = self.pipeline_class(**snake_case_ )
UpperCamelCase_: Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Optional[int] = pipe(**snake_case_ )
UpperCamelCase_: int = output.images
UpperCamelCase_: Tuple = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Optional[int] = pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
UpperCamelCase_: int = image[0, -3:, -3:, -1]
UpperCamelCase_: Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: Optional[int] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[Any] = """cpu"""
UpperCamelCase_: Tuple = self.get_dummy_components()
UpperCamelCase_: Dict = self.pipeline_class(**snake_case_ )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Tuple = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
UpperCamelCase_: List[Any] = pipe(**snake_case_ )
UpperCamelCase_: Any = output.images
UpperCamelCase_: Optional[Any] = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Optional[Any] = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
UpperCamelCase_: Optional[int] = pipe(
**snake_case_ , return_dict=snake_case_ , )[0]
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCamelCase_: List[str] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = torch.device("""cpu""" )
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 1
UpperCamelCase_: Union[str, Any] = self.get_dummy_components()
UpperCamelCase_: int = self.pipeline_class(**snake_case_ )
UpperCamelCase_: Optional[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = torch.Generator(device=snake_case_ ).manual_seed(0 )
UpperCamelCase_: int = pipe.decoder.dtype
UpperCamelCase_: Optional[Any] = 1
UpperCamelCase_: List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCamelCase_: Any = pipe.prepare_latents(
snake_case_ , dtype=snake_case_ , device=snake_case_ , generator=snake_case_ , latents=snake_case_ , scheduler=DummyScheduler() )
UpperCamelCase_: Tuple = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCamelCase_: int = pipe.prepare_latents(
snake_case_ , dtype=snake_case_ , device=snake_case_ , generator=snake_case_ , latents=snake_case_ , scheduler=DummyScheduler() )
UpperCamelCase_: List[Any] = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
UpperCamelCase_: Optional[int] = pipe(
**snake_case_ , decoder_latents=snake_case_ , super_res_latents=snake_case_ ).images
UpperCamelCase_: Optional[Any] = self.get_dummy_inputs(snake_case_ , pil_image=snake_case_ )
# Don't pass image, instead pass embedding
UpperCamelCase_: List[str] = pipeline_inputs.pop("""image""" )
UpperCamelCase_: Optional[int] = pipe.image_encoder(snake_case_ ).image_embeds
UpperCamelCase_: Union[str, Any] = pipe(
**snake_case_ , decoder_latents=snake_case_ , super_res_latents=snake_case_ , image_embeddings=snake_case_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCamelCase_: Any = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case_ , expected_max_diff=snake_case_ )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[int] = torch_device == """cpu"""
UpperCamelCase_: int = True
UpperCamelCase_: Dict = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case_ , relax_max_difference=snake_case_ , additional_params_copy_to_batched_inputs=snake_case_ , )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCamelCase_: Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case_ , additional_params_copy_to_batched_inputs=snake_case_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case_ )
@skip_mps
def lowerCAmelCase__ ( self : List[str] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase__ ( self : List[Any] ):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase__ ( self : Tuple ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
UpperCamelCase_: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
UpperCamelCase_: Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
UpperCamelCase_: Dict = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase_: Union[str, Any] = pipeline(
snake_case_ , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: str = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ , 15 )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import requests
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
UpperCamelCase_: Union[str, Any] = {"""Content-Type""": """application/json"""}
UpperCamelCase_: List[Any] = requests.post(lowerCamelCase , json={"""text""": message_body} , headers=lowerCamelCase )
if response.status_code != 2_00:
UpperCamelCase_: Optional[Any] = (
"""Request to slack returned an error """
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
def A__ ( lowerCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase_: Optional[Any] = grid[0]
for row_n in range(1 , len(lowerCamelCase ) ):
UpperCamelCase_: Dict = grid[row_n]
UpperCamelCase_: int = fill_row(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Any = grid[row_n]
return grid[-1][-1]
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ) -> Dict:
UpperCamelCase_: Optional[Any] = 9
UpperCamelCase_: Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase_: Union[str, Any] = kruskal(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase ) == sorted(lowerCamelCase )
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , snake_case_ : Any ):
UpperCamelCase_: Optional[Any] = data
UpperCamelCase_: List[Any] = None
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any ):
UpperCamelCase_: Dict = None
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = self.head
while temp is not None:
print(temp.data , end=""" """ )
UpperCamelCase_: Any = temp.next
print()
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Any ):
UpperCamelCase_: List[Any] = Node(snake_case_ )
UpperCamelCase_: str = self.head
UpperCamelCase_: Union[str, Any] = new_node
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
if node_data_a == node_data_a:
return
else:
UpperCamelCase_: List[str] = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase_: str = node_a.next
UpperCamelCase_: int = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase_: List[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCamelCase_, UpperCamelCase_: Tuple = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase_ : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
import numpy as np
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1E-1_2 , lowerCamelCase = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
UpperCamelCase_: Dict = np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCamelCase_: Dict = False
UpperCamelCase_: Tuple = 0
UpperCamelCase_: str = 0
UpperCamelCase_: List[str] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
UpperCamelCase_: Dict = np.dot(lowerCamelCase , lowerCamelCase )
# Normalize the resulting output vector.
UpperCamelCase_: Union[str, Any] = w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCamelCase_: Any = vector.conj().T if is_complex else vector.T
UpperCamelCase_: int = np.dot(lowerCamelCase , np.dot(lowerCamelCase , lowerCamelCase ) )
# Check convergence.
UpperCamelCase_: int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCamelCase_: int = True
UpperCamelCase_: str = lambda_
if is_complex:
UpperCamelCase_: List[Any] = np.real(lambda_ )
return lambda_, vector
def A__ ( ) -> None:
UpperCamelCase_: List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCamelCase_: Tuple = np.array([41, 4, 20] )
UpperCamelCase_: Tuple = real_input_matrix.astype(np.complexaaa )
UpperCamelCase_: List[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCamelCase_: List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCamelCase_: List[Any] = real_input_matrix
UpperCamelCase_: int = real_vector
elif problem_type == "complex":
UpperCamelCase_: int = complex_input_matrix
UpperCamelCase_: Optional[Any] = complex_vector
# Our implementation.
UpperCamelCase_, UpperCamelCase_: int = power_iteration(lowerCamelCase , lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCamelCase_, UpperCamelCase_: List[str] = np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
UpperCamelCase_: Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCamelCase_: Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def A__ ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase_ : str = 3
def A__ ( lowerCamelCase ) -> int:
print("""Generating primitive root of p""" )
while True:
UpperCamelCase_: Any = random.randrange(3 , lowerCamelCase )
if pow(lowerCamelCase , 2 , lowerCamelCase ) == 1:
continue
if pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) == 1:
continue
return g
def A__ ( lowerCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
UpperCamelCase_: Any = rabin_miller.generate_large_prime(lowerCamelCase ) # select large prime number.
UpperCamelCase_: List[Any] = primitive_root(lowerCamelCase ) # one primitive root on modulo p.
UpperCamelCase_: int = random.randrange(3 , lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase_: int = cryptomath.find_mod_inverse(pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = (key_size, e_a, e_a, p)
UpperCamelCase_: Optional[int] = (key_size, d)
return public_key, private_key
def A__ ( lowerCamelCase , lowerCamelCase ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase_, UpperCamelCase_: str = generate_key(lowerCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def A__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 20_48 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase_ : Optional[int] = get_tests_dir("""fixtures/dummy-config.json""")
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = 0
def lowerCAmelCase__ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = AutoConfig.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Union[str, Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCamelCase_: Any = os.path.join(snake_case_ , """fake-roberta""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(os.path.join(snake_case_ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
UpperCamelCase_: Tuple = AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(type(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
try:
AutoConfig.register("""custom""" , snake_case_ )
# Wrong model type will raise an error
with self.assertRaises(snake_case_ ):
AutoConfig.register("""model""" , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoConfig.register("""bert""" , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase_: Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
UpperCamelCase_: List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
snake_case_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase_: Any = AutoConfig.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase_: str = AutoConfig.from_pretrained(snake_case_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self : Tuple ):
with self.assertRaisesRegex(
snake_case_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
UpperCamelCase_: Any = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case_ )
UpperCamelCase_: str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
UpperCamelCase_: Optional[Any] = AutoConfig.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def lowerCAmelCase__ ( self : str ):
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = """new-model"""
try:
AutoConfig.register("""new-model""" , snake_case_ )
# If remote code is not set, the default is to use local
UpperCamelCase_: Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
UpperCamelCase_: List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=snake_case_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Any = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 , lowerCamelCase = 0 ) -> int:
UpperCamelCase_: List[str] = right or len(lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase , lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( lowerCamelCase ) -> list[list[float]]:
UpperCamelCase_: Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase_: Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase_: Dict = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase_, UpperCamelCase_: List[Any] = matrix[1][1], matrix[0][0]
UpperCamelCase_, UpperCamelCase_: int = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase_: Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCamelCase_: Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase_: Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase_: List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase_: Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase_: List[str] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase_: Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase_: Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase_: Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase_: str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase_: Any = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase_: int = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase_: List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase_: Optional[int] = array(lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
def A__ ( lowerCamelCase ) -> bool:
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def A__ ( lowerCamelCase ) -> int:
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def A__ ( lowerCamelCase = 1_00_00 ) -> int:
UpperCamelCase_: Optional[int] = []
for num in range(1 , lowerCamelCase ):
UpperCamelCase_: int = 0
UpperCamelCase_: Any = num
while iterations < 50:
UpperCamelCase_: Any = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase_ : Union[str, Any] = """src/transformers"""
lowerCamelCase_ : Union[str, Any] = """docs/source/en"""
lowerCamelCase_ : Union[str, Any] = """."""
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Tuple = f.readlines()
# Find the start prompt.
UpperCamelCase_: Tuple = 0
while not lines[start_index].startswith(lowerCamelCase ):
start_index += 1
start_index += 1
UpperCamelCase_: Dict = start_index
while not lines[end_index].startswith(lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase_ : Any = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
lowerCamelCase_ : str = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCamelCase_ : Optional[int] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase_ : Any = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase )
return [m.group(0 ) for m in matches]
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
UpperCamelCase_: List[str] = 2 if text == """✅""" or text == """❌""" else len(lowerCamelCase )
UpperCamelCase_: int = (width - text_length) // 2
UpperCamelCase_: List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A__ ( ) -> Tuple:
UpperCamelCase_: Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase_: Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCamelCase_: Optional[int] = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCamelCase_: List[Any] = collections.defaultdict(lowerCamelCase )
UpperCamelCase_: List[str] = collections.defaultdict(lowerCamelCase )
UpperCamelCase_: List[str] = collections.defaultdict(lowerCamelCase )
UpperCamelCase_: List[str] = collections.defaultdict(lowerCamelCase )
UpperCamelCase_: List[str] = collections.defaultdict(lowerCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase ):
UpperCamelCase_: int = None
if attr_name.endswith("""Tokenizer""" ):
UpperCamelCase_: int = slow_tokenizers
UpperCamelCase_: Dict = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
UpperCamelCase_: Union[str, Any] = fast_tokenizers
UpperCamelCase_: str = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase ) is not None:
UpperCamelCase_: Optional[Any] = tf_models
UpperCamelCase_: List[str] = _re_tf_models.match(lowerCamelCase ).groups()[0]
elif _re_flax_models.match(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = flax_models
UpperCamelCase_: List[Any] = _re_flax_models.match(lowerCamelCase ).groups()[0]
elif _re_pt_models.match(lowerCamelCase ) is not None:
UpperCamelCase_: str = pt_models
UpperCamelCase_: int = _re_pt_models.match(lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCamelCase_: List[Any] = True
break
# Try again after removing the last word in the name
UpperCamelCase_: str = """""".join(camel_case_split(lowerCamelCase )[:-1] )
# Let's build that table!
UpperCamelCase_: Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCamelCase_: List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCamelCase_: Tuple = [len(lowerCamelCase ) + 2 for c in columns]
UpperCamelCase_: List[Any] = max([len(lowerCamelCase ) for name in model_names] ) + 2
# Build the table per se
UpperCamelCase_: int = """|""" + """|""".join([_center_text(lowerCamelCase , lowerCamelCase ) for c, w in zip(lowerCamelCase , lowerCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
UpperCamelCase_: Optional[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
UpperCamelCase_: int = model_name_to_prefix[name]
UpperCamelCase_: str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase , lowerCamelCase ) for l, w in zip(lowerCamelCase , lowerCamelCase )] ) + "|\n"
return table
def A__ ( lowerCamelCase=False ) -> Dict:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Dict = _find_text_in_file(
filename=os.path.join(lowerCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
UpperCamelCase_: Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase_ : Dict = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def A__ ( lowerCamelCase ) -> List[Any]:
if hor == 1_28:
UpperCamelCase_: Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase_: int = (32, 1_28, 2_56)
UpperCamelCase_: List[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
UpperCamelCase_: str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase_: Any = (32, 64, 1_28, 2_56)
UpperCamelCase_: Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCamelCase_: Tuple = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCamelCase_: int = model.state_dict()
UpperCamelCase_: Dict = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCamelCase_: Tuple = UNetaDModel(**lowerCamelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCamelCase_: List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase_: Dict = state_dict.pop(lowerCamelCase )
hf_value_function.load_state_dict(lowerCamelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ ( ) -> Any:
UpperCamelCase_: Optional[Any] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCamelCase_: Any = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCamelCase_: Tuple = model
UpperCamelCase_: List[Any] = UNetaDModel(**lowerCamelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCamelCase_: str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase_: Union[str, Any] = state_dict.pop(lowerCamelCase )
hf_value_function.load_state_dict(lowerCamelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , snake_case_ : str , snake_case_ : Dict=2 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=10 , snake_case_ : int=3 , snake_case_ : Dict=32 * 4 , snake_case_ : str=32 * 6 , snake_case_ : Optional[int]=4 , snake_case_ : Tuple=32 , ):
UpperCamelCase_: Tuple = parent
UpperCamelCase_: Any = batch_size
UpperCamelCase_: str = is_training
UpperCamelCase_: str = use_auxiliary_loss
UpperCamelCase_: Tuple = num_queries
UpperCamelCase_: int = num_channels
UpperCamelCase_: Union[str, Any] = min_size
UpperCamelCase_: Dict = max_size
UpperCamelCase_: Dict = num_labels
UpperCamelCase_: Any = mask_feature_size
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCamelCase_: Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCamelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCamelCase_: Dict = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCamelCase_: Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : Optional[int] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
UpperCamelCase_: int = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Any = output.encoder_hidden_states
UpperCamelCase_: Any = output.pixel_decoder_hidden_states
UpperCamelCase_: Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]=False ):
with torch.no_grad():
UpperCamelCase_: int = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: str = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase_: Any = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Any ):
UpperCamelCase_: List[Any] = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_: Any = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase_: int = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCamelCase_: Optional[int] = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : str = False
__UpperCamelCase : str = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[Any] = MaskFormerModelTester(self )
UpperCamelCase_: Optional[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_, UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCAmelCase__ ( self : Tuple ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCAmelCase__ ( self : Any ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCAmelCase__ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase__ ( self : List[str] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self : int ):
pass
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Any = model_class(snake_case_ )
UpperCamelCase_: Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Any = [*signature.parameters.keys()]
UpperCamelCase_: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase_: int = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = (self.model_tester.min_size,) * 2
UpperCamelCase_: Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=snake_case_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=snake_case_ ),
"""class_labels""": torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
UpperCamelCase_: Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
UpperCamelCase_: List[Any] = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_, UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = model_class(snake_case_ ).to(snake_case_ )
UpperCamelCase_: str = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_: Optional[Any] = self.all_model_classes[1]
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_: Any = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase_: Optional[Any] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_: int = self.all_model_classes[1]
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_: Any = True
UpperCamelCase_: List[str] = True
UpperCamelCase_: Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase_: Optional[Any] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCamelCase_: List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_: int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase_: str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_: Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase_ : List[str] = 1E-4
def A__ ( ) -> Tuple:
UpperCamelCase_: List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : int ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(snake_case_ )
UpperCamelCase_: Any = self.default_image_processor
UpperCamelCase_: Any = prepare_img()
UpperCamelCase_: Any = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(**snake_case_ )
UpperCamelCase_: Dict = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase_: Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase_: Dict = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: Tuple = self.default_image_processor
UpperCamelCase_: Tuple = prepare_img()
UpperCamelCase_: Dict = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: int = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase_: List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_: str = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase_: List[str] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase_: Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_: Union[str, Any] = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: str = self.default_image_processor
UpperCamelCase_: List[Any] = prepare_img()
UpperCamelCase_: Union[str, Any] = image_processor(snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
UpperCamelCase_: Tuple = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase_: Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_: Tuple = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase_: Dict = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase_: Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_: List[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(snake_case_ )
.eval()
)
UpperCamelCase_: Optional[Any] = self.default_image_processor
UpperCamelCase_: Any = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
UpperCamelCase_: Any = inputs["""pixel_values"""].to(snake_case_ )
UpperCamelCase_: Any = [el.to(snake_case_ ) for el in inputs["""mask_labels"""]]
UpperCamelCase_: Union[str, Any] = [el.to(snake_case_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase_: str = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : str , snake_case_ : CLIPSegForImageSegmentation , snake_case_ : CLIPSegProcessor , snake_case_ : AutoencoderKL , snake_case_ : CLIPTextModel , snake_case_ : CLIPTokenizer , snake_case_ : UNetaDConditionModel , snake_case_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case_ : StableDiffusionSafetyChecker , snake_case_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
UpperCamelCase_: Dict = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , snake_case_ , standard_warn=snake_case_ )
UpperCamelCase_: Union[str, Any] = dict(scheduler.config )
UpperCamelCase_: List[str] = 1
UpperCamelCase_: Any = FrozenDict(snake_case_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase_: Union[str, Any] = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , snake_case_ , standard_warn=snake_case_ )
UpperCamelCase_: List[Any] = dict(scheduler.config )
UpperCamelCase_: Optional[int] = True
UpperCamelCase_: Dict = FrozenDict(snake_case_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=snake_case_ , segmentation_processor=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_: List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
self.enable_attention_slicing(snake_case_ )
def lowerCAmelCase__ ( self : str ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase_: Dict = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Optional[int] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : int , snake_case_ : Union[str, List[str]] , snake_case_ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case_ : str , snake_case_ : int = 512 , snake_case_ : int = 512 , snake_case_ : int = 50 , snake_case_ : float = 7.5 , snake_case_ : Optional[Union[str, List[str]]] = None , snake_case_ : Optional[int] = 1 , snake_case_ : float = 0.0 , snake_case_ : Optional[torch.Generator] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , **snake_case_ : List[Any] , ):
UpperCamelCase_: int = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
UpperCamelCase_: List[str] = self.segmentation_model(**snake_case_ )
UpperCamelCase_: Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase_: Union[str, Any] = self.numpy_to_pil(snake_case_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase_: Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , )
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Optional[Any] = 0
while number > 0:
UpperCamelCase_: Tuple = number % 10
sum_of_digits += last_digit
UpperCamelCase_: Union[str, Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A__ ( lowerCamelCase = 1_00 ) -> int:
UpperCamelCase_: List[str] = factorial(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = split_and_add(lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def A__ ( lowerCamelCase , lowerCamelCase ) -> Generator[tuple[str, ...], None, None]:
UpperCamelCase_: Tuple = iter(lowerCamelCase )
while True:
UpperCamelCase_: Optional[int] = tuple(itertools.islice(lowerCamelCase , lowerCamelCase ) )
if not chunk:
return
yield chunk
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: int = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
UpperCamelCase_: Tuple = """"""
if len(lowerCamelCase ) < 2:
return dirty
for i in range(len(lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase ) & 1:
clean += "X"
return clean
def A__ ( lowerCamelCase ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
UpperCamelCase_: Tuple = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCamelCase_: str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase )
return table
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: int = generate_table(lowerCamelCase )
UpperCamelCase_: Optional[Any] = prepare_input(lowerCamelCase )
UpperCamelCase_: int = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
UpperCamelCase_, UpperCamelCase_: List[Any] = divmod(table.index(lowerCamelCase ) , 5 )
UpperCamelCase_, UpperCamelCase_: Any = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Union[str, Any] = generate_table(lowerCamelCase )
UpperCamelCase_: Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
UpperCamelCase_, UpperCamelCase_: str = divmod(table.index(lowerCamelCase ) , 5 )
UpperCamelCase_, UpperCamelCase_: Optional[Any] = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Any = """Pix2StructImageProcessor"""
__UpperCamelCase : Union[str, Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = False
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Optional[Any] , snake_case_ : List[Any]=None , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = 2048 , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Any , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase_: str = self.tokenizer
UpperCamelCase_: Tuple = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase_: Tuple = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , **snake_case_ )
else:
# add pixel_values and bbox
UpperCamelCase_: Tuple = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , header_text=snake_case_ , **snake_case_ )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase_: str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if "attention_mask" in text_encoding:
UpperCamelCase_: Any = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
UpperCamelCase_: str = text_encoding.pop("""input_ids""" )
else:
UpperCamelCase_: int = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : Tuple , **snake_case_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.model_input_names
UpperCamelCase_: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = DownBlockaD # noqa F405
__UpperCamelCase : Union[str, Any] = """down"""
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ResnetDownsampleBlockaD # noqa F405
__UpperCamelCase : Dict = """down"""
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = AttnDownBlockaD # noqa F405
__UpperCamelCase : int = """down"""
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = CrossAttnDownBlockaD # noqa F405
__UpperCamelCase : List[Any] = """down"""
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_, UpperCamelCase_: List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: List[Any] = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[str] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCamelCase : Tuple = """down"""
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: int = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: Optional[int] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = SkipDownBlockaD # noqa F405
__UpperCamelCase : str = """down"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = AttnSkipDownBlockaD # noqa F405
__UpperCamelCase : Optional[int] = """down"""
@property
def lowerCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = DownEncoderBlockaD # noqa F405
__UpperCamelCase : Any = """down"""
@property
def lowerCAmelCase__ ( self : Dict ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
UpperCamelCase_: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = AttnDownEncoderBlockaD # noqa F405
__UpperCamelCase : Optional[int] = """down"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
UpperCamelCase_: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[Any] = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = UNetMidBlockaD # noqa F405
__UpperCamelCase : Optional[int] = """mid"""
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Any = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
UpperCamelCase_: List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Union[str, Any] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = UNetMidBlockaDCrossAttn # noqa F405
__UpperCamelCase : str = """mid"""
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_, UpperCamelCase_: int = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: Tuple = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Tuple = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCamelCase : int = """mid"""
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_, UpperCamelCase_: List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: str = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = UpBlockaD # noqa F405
__UpperCamelCase : Dict = """up"""
@property
def lowerCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = ResnetUpsampleBlockaD # noqa F405
__UpperCamelCase : str = """up"""
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__UpperCamelCase : str = """up"""
@property
def lowerCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_, UpperCamelCase_: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: int = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCamelCase : Union[str, Any] = """up"""
@property
def lowerCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ , include_encoder_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_, UpperCamelCase_: Dict = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase_: str = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = AttnUpBlockaD # noqa F405
__UpperCamelCase : List[Any] = """up"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = SkipUpBlockaD # noqa F405
__UpperCamelCase : List[Any] = """up"""
@property
def lowerCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = AttnSkipUpBlockaD # noqa F405
__UpperCamelCase : List[Any] = """up"""
@property
def lowerCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Dict = UpDecoderBlockaD # noqa F405
__UpperCamelCase : Union[str, Any] = """up"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = {"""in_channels""": 32, """out_channels""": 32}
UpperCamelCase_: int = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(snake_case_ )
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCamelCase : List[str] = """up"""
@property
def lowerCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = {"""in_channels""": 32, """out_channels""": 32}
UpperCamelCase_: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(snake_case_ )
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
from collections import defaultdict
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = 1
UpperCamelCase_: Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCamelCase )
if ret % 2 == 0:
cuts.append(lowerCamelCase )
return ret
def A__ ( ) -> Optional[int]:
dfs(1 )
if __name__ == "__main__":
lowerCamelCase_ , lowerCamelCase_ : List[Any] = 10, 9
lowerCamelCase_ : List[Any] = defaultdict(list)
lowerCamelCase_ : dict[int, bool] = {}
lowerCamelCase_ : list[int] = []
lowerCamelCase_ : str = 0
lowerCamelCase_ : Union[str, Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.