code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BILINEAR , __lowerCamelCase = True , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = True , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase)
_A : Tuple = size if size is not None else {"shortest_edge": 2_2_4}
_A : Dict = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase)
_A : List[str] = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
_A : Optional[int] = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : List[str] = do_resize
_A : Optional[int] = size
_A : List[str] = resample
_A : int = do_rescale
_A : Optional[int] = rescale_factor
_A : Optional[int] = do_center_crop
_A : int = crop_size
_A : Optional[int] = do_flip_channel_order
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PIL.Image.BILINEAR , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : List[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase)
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
_A : Union[str, Any] = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase)
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> np.ndarray:
_A : int = get_size_dict(__lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> List[str]:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> np.ndarray:
return flip_channel_order(__lowerCamelCase , data_format=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> PIL.Image.Image:
_A : Tuple = do_resize if do_resize is not None else self.do_resize
_A : Dict = resample if resample is not None else self.resample
_A : int = do_rescale if do_rescale is not None else self.do_rescale
_A : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Tuple = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A : List[Any] = size if size is not None else self.size
_A : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase)
_A : Optional[int] = crop_size if crop_size is not None else self.crop_size
_A : Any = get_size_dict(__lowerCamelCase , param_name="crop_size")
_A : int = make_list_of_images(__lowerCamelCase)
if not valid_images(__lowerCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
_A : str = [to_numpy_array(__lowerCamelCase) for image in images]
if do_resize:
_A : List[Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase) for image in images]
if do_center_crop:
_A : Dict = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase) for image in images]
if do_rescale:
_A : Dict = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A : int = [self.flip_channel_order(image=__lowerCamelCase) for image in images]
_A : Tuple = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase) for image in images]
_A : List[str] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> int:
_A : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCamelCase) != len(__lowerCamelCase):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__lowerCamelCase):
_A : Tuple = target_sizes.numpy()
_A : Dict = []
for idx in range(len(__lowerCamelCase)):
_A : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase)
_A : List[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__lowerCamelCase)
else:
_A : List[Any] = logits.argmax(dim=1)
_A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 1 |
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Optional[Any]:
_A : Union[str, Any] = {}
def _lowerCamelCase ( self) -> None:
print(self.vertex)
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase) for j in self.vertex[i]]))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase)
else:
# else make a new vertex
_A : Optional[Any] = [to_vertex]
def _lowerCamelCase ( self) -> None:
# visited array for storing already visited nodes
_A : List[Any] = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None:
# mark start vertex as visited
_A : str = True
print(__lowerCamelCase , end=" ")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase)
if __name__ == "__main__":
lowerCAmelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 11 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase__ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase__ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase__ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="auto" , __lowerCamelCase=-1 , __lowerCamelCase=0.9 , __lowerCamelCase=5 , __lowerCamelCase=5_0_0 , __lowerCamelCase="gpt2-large" , __lowerCamelCase=-1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=2_5 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase=2_5 , ) -> Optional[Any]:
_A : Optional[int] = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out
| 11 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int = 2000000 ):
_A : Tuple = [0 for i in range(n + 1 )]
_A : Union[str, Any] = 1
_A : Optional[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase__ ):
_A : Union[str, Any] = 1
_A : Union[str, Any] = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 1 |
def _UpperCAmelCase ():
_A : Optional[int] = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 11 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "xlm-roberta-xl"
def __init__( self , __lowerCamelCase=2_5_0_8_8_0 , __lowerCamelCase=2_5_6_0 , __lowerCamelCase=3_6 , __lowerCamelCase=3_2 , __lowerCamelCase=1_0_2_4_0 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_4 , __lowerCamelCase=1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-05 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
_A : Any = vocab_size
_A : Dict = hidden_size
_A : str = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Any = hidden_act
_A : Any = intermediate_size
_A : Union[str, Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : Optional[int] = max_position_embeddings
_A : Dict = type_vocab_size
_A : Optional[Any] = initializer_range
_A : int = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Tuple = use_cache
_A : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ProphetNetTokenizer
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Union[str, Any]:
super().setUp()
_A : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
_A : Optional[Any] = "UNwant\u00E9d,running"
_A : Optional[Any] = "unwanted, running"
return input_text, output_text
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.tokenizer_class(self.vocab_file)
_A : Optional[int] = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [9, 6, 7, 1_2, 1_0, 1_1])
def _lowerCamelCase ( self) -> Any:
_A : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Tuple = BasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> int:
_A : Dict = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def _lowerCamelCase ( self) -> str:
_A : int = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = BasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> Dict:
_A : List[Any] = BasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> int:
_A : List[str] = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> Any:
_A : List[Any] = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def _lowerCamelCase ( self) -> str:
_A : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_A : List[Any] = {}
for i, token in enumerate(__lowerCamelCase):
_A : Tuple = i
_A : Dict = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
_A : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A : int = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_A : Optional[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
_A : Dict = list(batch.input_ids.numpy()[0])
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def _lowerCamelCase ( self) -> Optional[Any]:
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def _lowerCamelCase ( self) -> Optional[Any]:
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def _lowerCamelCase ( self) -> Dict:
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def _lowerCamelCase ( self) -> Tuple:
_A : List[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
_A : str = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase)
_A : str = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase)
_A : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase)
_A : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase)
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 11 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MvpTokenizer
__SCREAMING_SNAKE_CASE = MvpTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_roberta_detectors
def _lowerCamelCase ( self) -> str:
super().setUp()
_A : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_A : Tuple = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_A : Tuple = {"unk_token": "<unk>"}
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__lowerCamelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__lowerCamelCase))
def _lowerCamelCase ( self , **__lowerCamelCase) -> int:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase)
def _lowerCamelCase ( self , **__lowerCamelCase) -> Any:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self) -> Tuple:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp")
@cached_property
def _lowerCamelCase ( self) -> Optional[int]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp")
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A : Any = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : str = tokenizer(__lowerCamelCase , max_length=len(__lowerCamelCase) , padding=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
_A : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self) -> Any:
_A : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Any = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt")
# check if input_ids are returned and no labels
self.assertIn("input_ids" , __lowerCamelCase)
self.assertIn("attention_mask" , __lowerCamelCase)
self.assertNotIn("labels" , __lowerCamelCase)
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase)
@require_torch
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Tuple = tokenizer(text_target=__lowerCamelCase , max_length=3_2 , padding="max_length" , return_tensors="pt")
self.assertEqual(3_2 , targets["input_ids"].shape[1])
@require_torch
def _lowerCamelCase ( self) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : str = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4))
@require_torch
def _lowerCamelCase ( self) -> Tuple:
_A : Union[str, Any] = ["A long paragraph for summarization."]
_A : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Dict = tokenizer(__lowerCamelCase , text_target=__lowerCamelCase , return_tensors="pt")
_A : Dict = inputs["input_ids"]
_A : List[str] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_A : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : Dict = "A, <mask> AllenNLP sentence."
_A : List[str] = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase)
_A : List[Any] = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
_A : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
_A : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 11 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 1 |
lowerCAmelCase__ = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Tuple = len(UpperCamelCase__ )
_A : List[str] = sum(UpperCamelCase__ )
_A : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_A : int = True
for i in range(1 , s + 1 ):
_A : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_A : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_A : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_A : Optional[int] = s - 2 * j
break
return diff
| 11 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 1 |
from collections.abc import Callable
def _UpperCAmelCase (UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float , UpperCamelCase__ : float ):
_A : float = a
_A : float = b
if function(UpperCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase__ ) == 0:
return b
elif (
function(UpperCamelCase__ ) * function(UpperCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
_A : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase__ ) == 0:
return mid
elif function(UpperCamelCase__ ) * function(UpperCamelCase__ ) < 0:
_A : Optional[Any] = mid
else:
_A : List[Any] = mid
_A : Optional[Any] = start + (end - start) / 2.0
return mid
def _UpperCAmelCase (UpperCamelCase__ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase__ = 10
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Any = 0
_A : Union[str, Any] = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Dict = (left + right) // 3 + 1
_A : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_A : Optional[Any] = one_third - 1
elif array[two_third] < target:
_A : List[Any] = two_third + 1
else:
_A : int = one_third + 1
_A : List[Any] = two_third - 1
else:
return -1
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[int] = (left + right) // 3 + 1
_A : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase__ = int(input('Enter the number to be found in the list:\n').strip())
lowerCAmelCase__ = ite_ternary_search(collection, target)
lowerCAmelCase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 11 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_non_english
def _lowerCamelCase ( self) -> Optional[int]:
super().setUp()
_A : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_A : Optional[Any] = {}
_A : Optional[Any] = {}
for i, value in enumerate(__lowerCamelCase):
_A : Any = i
_A : Optional[Any] = i
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
_A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_A : Optional[Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(__lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase) , [5, 6, 2, 5, 7, 8])
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Dict = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> str:
_A : List[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def _lowerCamelCase ( self) -> Tuple:
_A : List[str] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : int = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> List[str]:
_A : Tuple = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> Any:
_A : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCamelCase ( self) -> List[Any]:
_A : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def _lowerCamelCase ( self) -> Dict:
_A : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_A : Optional[int] = {}
for i, token in enumerate(__lowerCamelCase):
_A : List[str] = i
_A : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def _lowerCamelCase ( self) -> Optional[int]:
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def _lowerCamelCase ( self) -> str:
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def _lowerCamelCase ( self) -> int:
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def _lowerCamelCase ( self) -> Any:
_A : Dict = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
_A : List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def _lowerCamelCase ( self) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : int = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_A : Optional[int] = tokenizer_r.encode_plus(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , )
_A : Tuple = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , "do_lower_case") else False
_A : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def _lowerCamelCase ( self) -> Tuple:
_A : Tuple = ["的", "人", "有"]
_A : int = "".join(__lowerCamelCase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_A : Union[str, Any] = True
_A : Tuple = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : str = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : List[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : Tuple = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase)
_A : Dict = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
_A : Union[str, Any] = False
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : str = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : Dict = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : Optional[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : Dict = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase)
_A : int = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase)
# it is expected that only the first Chinese character is not preceded by "##".
_A : Tuple = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase)
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
@slow
def _lowerCamelCase ( self) -> str:
_A : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_A : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__lowerCamelCase)
_A : List[Any] = tokenizer.encode("你是谁" , add_special_tokens=__lowerCamelCase)
_A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase)
_A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCamelCase ( self) -> Optional[int]:
_A : Tuple = self.get_tokenizers(do_lower_case=__lowerCamelCase)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_A : Any = "你好,你是谁"
_A : Dict = tokenizer.tokenize(__lowerCamelCase)
_A : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
_A : Optional[Any] = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase)
_A : str = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase)
_A : Tuple = tokenizer.prepare_for_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase)
_A : List[Any] = tokenizer.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
| 11 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 1 |
from math import pi, sqrt, tan
def _UpperCAmelCase (UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_A : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _UpperCAmelCase (UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_A : int = (sidea + sidea + sidea) / 2
_A : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : float ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 11 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase__ = 5_00_03
lowerCAmelCase__ = 5_00_02
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PLBartTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Union[str, Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self) -> List[Any]:
_A : List[Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase)
_A : List[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A : Optional[int] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_A : Tuple = tokenizer.vocab_size
_A : int = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 4 , __lowerCamelCase)]
self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"])
_A : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_A : Dict = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : str = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase)
_A : Optional[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A : str = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_A : Dict = tokenizer.vocab_size
_A : List[Any] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 7 , __lowerCamelCase)]
self.assertListEqual(
__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
_A : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_A : Any = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "uclanlp/plbart-python-en_XX"
__SCREAMING_SNAKE_CASE = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__SCREAMING_SNAKE_CASE = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__SCREAMING_SNAKE_CASE = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls) -> str:
_A : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
_A : str = 1
return cls
def _lowerCamelCase ( self) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3)
def _lowerCamelCase ( self) -> Any:
_A : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids)
_A : int = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_A : Dict = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
_A : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __lowerCamelCase)
_A : str = 1_0
_A : str = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __lowerCamelCase)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0_0_0_4, 5_0_0_0_1])
def _lowerCamelCase ( self) -> str:
_A : Any = tempfile.mkdtemp()
_A : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase)
_A : int = PLBartTokenizer.from_pretrained(__lowerCamelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase)
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt")
_A : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
_A : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 2_6) , batch.input_ids.shape)
self.assertEqual((2, 2_6) , batch.attention_mask.shape)
_A : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _lowerCamelCase ( self) -> List[Any]:
_A : List[Any] = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt")
_A : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt")
_A : Any = targets["input_ids"]
_A : Union[str, Any] = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : str = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 11 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git_vision_model"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ) -> Any:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : int = intermediate_size
_A : int = num_hidden_layers
_A : Dict = num_attention_heads
_A : Dict = num_channels
_A : List[Any] = patch_size
_A : str = image_size
_A : str = initializer_range
_A : int = attention_dropout
_A : Tuple = layer_norm_eps
_A : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase)
_A , _A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type") == "git":
_A : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git"
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=6 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1_0_1 , __lowerCamelCase=1_0_2 , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase)
if vision_config is None:
_A : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
_A : str = GitVisionConfig(**__lowerCamelCase)
_A : Tuple = vocab_size
_A : str = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Optional[Any] = hidden_act
_A : str = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : List[Any] = initializer_range
_A : List[Any] = layer_norm_eps
_A : Union[str, Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : int = tie_word_embeddings
_A : int = num_image_with_embedding
_A : Optional[Any] = bos_token_id
_A : str = eos_token_id
def _lowerCamelCase ( self) -> Dict:
_A : str = copy.deepcopy(self.__dict__)
_A : Any = self.vision_config.to_dict()
_A : Tuple = self.__class__.model_type
return output
| 11 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nielsr/canine-s': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase__ = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0xE000
lowerCAmelCase__ = 0xE001
lowerCAmelCase__ = 0xE002
lowerCAmelCase__ = 0xE003
lowerCAmelCase__ = 0xE004
# Maps special codepoints to human-readable names.
lowerCAmelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=chr(__lowerCamelCase) , __lowerCamelCase=False , __lowerCamelCase=2_0_4_8 , **__lowerCamelCase , ) -> Optional[Any]:
_A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else bos_token
_A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else eos_token
_A : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else sep_token
_A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else cls_token
_A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_A : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_A : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_A : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_A : Dict = UNICODE_VOCAB_SIZE
_A : Optional[int] = len(self._special_codepoints)
@property
def _lowerCamelCase ( self) -> int:
return self._unicode_vocab_size
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
return list(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
try:
return ord(__lowerCamelCase)
except TypeError:
raise ValueError(F"invalid token: '{token}'")
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase)
except TypeError:
raise ValueError(F"invalid id: {index}")
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]:
return "".join(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : int = [self.cls_token_id]
_A : Any = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase)
_A : Optional[Any] = [1] + ([0] * len(__lowerCamelCase)) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase)) + [1]
return result
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Optional[Any] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
_A : Tuple = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Union[str, Any]:
return ()
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 1 |
import argparse
import os
import re
lowerCAmelCase__ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'\[([^\]]+)\]')
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : List[Any] = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]="" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None ):
_A : List[str] = 0
_A : Union[str, Any] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
_A : Tuple = ["\n".join(lines[:index] )]
else:
_A : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_A : int = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
_A : Dict = [lines[index + 1]]
index += 1
else:
_A : Optional[Any] = []
else:
blocks.append("\n".join(UpperCamelCase__ ) )
_A : List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append("\n".join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _UpperCAmelCase (UpperCamelCase__ : Any ):
def _inner(UpperCamelCase__ : str ):
return key(UpperCamelCase__ ).lower().replace("_" , "" )
return _inner
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=None ):
# If no key is provided, we use a noop.
def noop(UpperCamelCase__ : Optional[Any] ):
return x
if key is None:
_A : Tuple = noop
# Constants are all uppercase, they go first.
_A : Optional[int] = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_A : List[Any] = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_A : List[str] = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
_A : List[str] = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] ):
# This inner function sort imports between [ ].
def _replace(UpperCamelCase__ : int ):
_A : Any = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
_A : Tuple = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_A : str = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(UpperCamelCase__ )] ) + "]"
_A : Optional[int] = import_statement.split("\n" )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_A : List[str] = 2 if lines[1].strip() == "[" else 1
_A : Union[str, Any] = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_A : str = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
_A : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_A : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
_A : Tuple = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_A : str = keys[:-1]
_A : Any = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
_A : Dict = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : int=True ):
with open(UpperCamelCase__ , encoding="utf-8" ) as f:
_A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_A : Any = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_A : Dict = main_blocks[block_idx]
_A : List[str] = block.split("\n" )
# Get to the start of the imports.
_A : List[str] = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_A : Optional[Any] = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_A : Union[str, Any] = "\n".join(block_lines[line_idx:-1] )
_A : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_A : Tuple = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_A : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_A : str = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_A : int = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
_A : List[Any] = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_A : str = 0
_A : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_A : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
_A : Optional[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int]=True ):
_A : Any = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
_A : int = sort_imports(os.path.join(UpperCamelCase__ , "__init__.py" ) , check_only=UpperCamelCase__ )
if result:
_A : List[Any] = [os.path.join(UpperCamelCase__ , "__init__.py" )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(f"Would overwrite {len(UpperCamelCase__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase (UpperCamelCase__ : str ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : Dict = model_type_to_module_name(UpperCamelCase__ )
_A : Tuple = importlib.import_module(f".{module_name}" , "transformers.models" )
try:
return getattr(UpperCamelCase__ , UpperCamelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase__ , "__name__" , UpperCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : Any = importlib.import_module("transformers" )
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
return getattr(UpperCamelCase__ , UpperCamelCase__ )
return None
def _UpperCAmelCase (UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
_A : Tuple = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase__ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase__ )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Any:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase)
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
_A : Optional[int] = kwargs.pop("config" , __lowerCamelCase)
_A : Tuple = kwargs.pop("trust_remote_code" , __lowerCamelCase)
_A : List[Any] = True
_A , _A : Optional[int] = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase , **__lowerCamelCase)
_A : List[Any] = config_dict.get("feature_extractor_type" , __lowerCamelCase)
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
_A : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase):
_A : str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
# It could be in `config.feature_extractor_type``
_A : List[Any] = getattr(__lowerCamelCase , "feature_extractor_type" , __lowerCamelCase)
if hasattr(__lowerCamelCase , "auto_map") and "AutoFeatureExtractor" in config.auto_map:
_A : Optional[int] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
_A : List[Any] = feature_extractor_class_from_name(__lowerCamelCase)
_A : Any = feature_extractor_auto_map is not None
_A : Optional[int] = feature_extractor_class is not None or type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING
_A : int = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
if has_remote_code and trust_remote_code:
_A : List[Any] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
_A : List[str] = kwargs.pop("code_revision" , __lowerCamelCase)
if os.path.isdir(__lowerCamelCase):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCamelCase) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase)]
return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase)
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}")
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase)
| 11 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
# Initialise PyTorch model
_A : List[Any] = RemBertConfig.from_json_file(UpperCamelCase__ )
print("Building PyTorch model from configuration: {}".format(str(UpperCamelCase__ ) ) )
_A : Dict = RemBertModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCamelCase__ ) )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 11 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "sequence-classification"
def __init__( self , __lowerCamelCase) -> List[Any]:
if type(__lowerCamelCase) == dict:
_A : Tuple = Namespace(**__lowerCamelCase)
_A : Tuple = glue_output_modes[hparams.task]
_A : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCamelCase , __lowerCamelCase , self.mode)
def _lowerCamelCase ( self , **__lowerCamelCase) -> List[str]:
return self.model(**__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> str:
_A : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_A : Union[str, Any] = self(**__lowerCamelCase)
_A : str = outputs[0]
_A : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"]
_A : Tuple = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCamelCase ( self) -> Dict:
_A : Tuple = self.hparams
_A : Dict = processors[args.task]()
_A : str = processor.get_labels()
for mode in ["train", "dev"]:
_A : Optional[int] = self._feature_file(__lowerCamelCase)
if os.path.exists(__lowerCamelCase) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __lowerCamelCase)
else:
logger.info("Creating features from dataset file at %s" , args.data_dir)
_A : Any = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
_A : Union[str, Any] = convert_examples_to_features(
__lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __lowerCamelCase)
torch.save(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False) -> DataLoader:
_A : Dict = "dev" if mode == "test" else mode
_A : Optional[int] = self._feature_file(__lowerCamelCase)
logger.info("Loading features from cached file %s" , __lowerCamelCase)
_A : str = torch.load(__lowerCamelCase)
_A : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
_A : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
_A : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
_A : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
_A : int = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Any:
_A : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_A : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_A : Union[str, Any] = self(**__lowerCamelCase)
_A , _A : Union[str, Any] = outputs[:2]
_A : Any = logits.detach().cpu().numpy()
_A : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCamelCase) -> tuple:
_A : int = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item()
_A : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
_A : int = np.argmax(__lowerCamelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
_A : Tuple = np.squeeze(__lowerCamelCase)
_A : Any = np.concatenate([x["target"] for x in outputs] , axis=0)
_A : Optional[Any] = [[] for _ in range(out_label_ids.shape[0])]
_A : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0])]
_A : str = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCamelCase , __lowerCamelCase)}
_A : List[Any] = dict(results.items())
_A : str = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCamelCase) -> dict:
_A , _A , _A : Tuple = self._eval_end(__lowerCamelCase)
_A : List[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCamelCase) -> dict:
_A , _A , _A : int = self._eval_end(__lowerCamelCase)
_A : Any = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> List[str]:
BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase)
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=__lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__lowerCamelCase , required=__lowerCamelCase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__lowerCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
return parser
def _UpperCAmelCase ():
_A : Optional[Any] = argparse.ArgumentParser()
add_generic_args(UpperCamelCase__ , os.getcwd() )
_A : Optional[int] = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
_A : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_A : Any = os.path.join(
"./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
_A : Union[str, Any] = GLUETransformer(UpperCamelCase__ )
_A : Optional[int] = generic_train(UpperCamelCase__ , UpperCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_A : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=UpperCamelCase__ ) )
_A : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "marian"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
__SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __lowerCamelCase=5_8_1_0_1 , __lowerCamelCase=None , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="gelu" , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=5_8_1_0_0 , __lowerCamelCase=False , __lowerCamelCase=5_8_1_0_0 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=True , **__lowerCamelCase , ) -> List[str]:
_A : int = vocab_size
_A : Tuple = decoder_vocab_size or vocab_size
_A : Tuple = max_position_embeddings
_A : Optional[Any] = d_model
_A : List[Any] = encoder_ffn_dim
_A : Optional[int] = encoder_layers
_A : Any = encoder_attention_heads
_A : Dict = decoder_ffn_dim
_A : Any = decoder_layers
_A : str = decoder_attention_heads
_A : Optional[Any] = dropout
_A : Optional[Any] = attention_dropout
_A : Dict = activation_dropout
_A : Any = activation_function
_A : Any = init_std
_A : str = encoder_layerdrop
_A : Tuple = decoder_layerdrop
_A : List[Any] = use_cache
_A : Optional[Any] = encoder_layers
_A : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_A : Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_A : Tuple = {0: "batch"}
_A : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A : Dict = {0: "batch", 1: "decoder_sequence"}
_A : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
_A : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_A , _A : int = self.num_layers
for i in range(__lowerCamelCase):
_A : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
_A : str = {0: "batch", 2: "past_sequence + sequence"}
else:
_A : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : Union[str, Any] = super().outputs
else:
_A : List[Any] = super(__lowerCamelCase , self).outputs
if self.use_past:
_A , _A : Union[str, Any] = self.num_layers
for i in range(__lowerCamelCase):
_A : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
_A : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Generate decoder inputs
_A : Dict = seq_length if not self.use_past else 1
_A : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Tuple = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_A : List[str] = dict(**__lowerCamelCase , **__lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : Dict = common_inputs["input_ids"].shape
_A : Optional[int] = common_inputs["decoder_input_ids"].shape[1]
_A , _A : int = self.num_attention_heads
_A : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Tuple = decoder_seq_length + 3
_A : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase)] , dim=1)
_A : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A : str = self.num_layers
_A : Tuple = min(__lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = max(__lowerCamelCase , __lowerCamelCase) - min_num_layers
_A : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
))
# TODO: test this.
_A : Dict = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)))
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A : Dict = seqlen + 2
_A , _A : Any = self.num_layers
_A , _A : Dict = self.num_attention_heads
_A : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Union[str, Any] = common_inputs["attention_mask"].dtype
_A : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1)
_A : List[Any] = [
(torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(__lowerCamelCase)
]
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_A : Union[str, Any] = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A : Optional[int] = tokenizer.num_special_tokens_to_add(__lowerCamelCase)
_A : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase)
# Generate dummy inputs according to compute batch and sequence
_A : List[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
_A : Dict = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase))
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_A : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
else:
_A : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
_A : str = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
else:
_A : Tuple = super(__lowerCamelCase , self)._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
@property
def _lowerCamelCase ( self) -> float:
return 1e-4
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "Speech2TextFeatureExtractor"
__SCREAMING_SNAKE_CASE = "Speech2TextTokenizer"
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase)
_A : Any = self.feature_extractor
_A : int = False
def __call__( self , *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
_A : Optional[int] = kwargs.pop("raw_speech")
else:
_A : Optional[int] = kwargs.pop("audio" , __lowerCamelCase)
_A : Optional[Any] = kwargs.pop("sampling_rate" , __lowerCamelCase)
_A : List[Any] = kwargs.pop("text" , __lowerCamelCase)
if len(__lowerCamelCase) > 0:
_A : int = args[0]
_A : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
_A : int = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase)
if text is not None:
_A : int = self.tokenizer(__lowerCamelCase , **__lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A : Tuple = encodings["input_ids"]
return inputs
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@contextmanager
def _lowerCamelCase ( self) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
_A : Optional[int] = True
_A : str = self.tokenizer
yield
_A : Union[str, Any] = self.feature_extractor
_A : List[Any] = False
| 11 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 1 |
import heapq
import sys
import numpy as np
lowerCAmelCase__ = tuple[int, int]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Optional[int]:
_A : List[str] = []
_A : Optional[int] = set()
def _lowerCamelCase ( self) -> Any:
if not self.empty():
return self.elements[0][0]
else:
return float("inf")
def _lowerCamelCase ( self) -> List[str]:
return len(self.elements) == 0
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(__lowerCamelCase)
else:
# update
# print("update", item)
_A : Optional[Any] = []
((_A) , (_A)) : Dict = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((_A) , (_A)) : Optional[Any] = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
if item in self.set:
self.set.remove(__lowerCamelCase)
_A : Any = []
((_A) , (_A)) : List[Any] = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((_A) , (_A)) : Union[str, Any] = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def _lowerCamelCase ( self) -> Any:
return self.elements[0][1]
def _lowerCamelCase ( self) -> Optional[Any]:
((_A) , (_A)) : Any = heapq.heappop(self.elements)
self.set.remove(__lowerCamelCase)
return (priority, item)
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# euclidean distance
_A : Optional[Any] = np.array(UpperCamelCase__ )
_A : str = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
_A : str = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
_A : List[Any] = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_A : Optional[int] = "*"
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
_A : Dict = "#"
_A : Optional[int] = "-"
_A : Union[str, Any] = back_pointer[goal]
while x != start:
((_A) , (_A)) : Optional[Any] = x
# print(x)
_A : Union[str, Any] = "-"
_A : List[Any] = back_pointer[x]
_A : int = "-"
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
_A : Optional[Any] = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=" " )
_A : List[Any] = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def _UpperCAmelCase (UpperCamelCase__ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((_A) , (_A)) : int = s
_A : Dict = (x - 1, y)
_A : Any = (x + 1, y)
_A : Dict = (x, y + 1)
_A : Union[str, Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
_A : Dict = -1
_A : List[str] = float("inf" )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
_A : Union[str, Any] = g_function[s] + 1
_A : int = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCAmelCase__ = make_common_ground()
lowerCAmelCase__ = blocks_blk
# hyper parameters
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1
lowerCAmelCase__ = 20
lowerCAmelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (n - 1, n - 1)
lowerCAmelCase__ = 1
def _UpperCAmelCase (UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
_A : Optional[int] = {start: 0, goal: float("inf" )}
_A : int = {start: -1, goal: -1}
_A : Dict = []
_A : Dict = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
_A : list[int] = []
_A : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_A , _A : Tuple = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
_A : Dict = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 11 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "xlm"
__SCREAMING_SNAKE_CASE = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , __lowerCamelCase=3_0_1_4_5 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=True , __lowerCamelCase=5_1_2 , __lowerCamelCase=2_0_4_8**-0.5 , __lowerCamelCase=1e-12 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase="first" , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=5 , __lowerCamelCase=5 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Optional[int]:
_A : Optional[Any] = vocab_size
_A : Optional[Any] = emb_dim
_A : Union[str, Any] = n_layers
_A : int = n_heads
_A : Union[str, Any] = dropout
_A : List[str] = attention_dropout
_A : Tuple = gelu_activation
_A : Dict = sinusoidal_embeddings
_A : int = causal
_A : int = asm
_A : int = n_langs
_A : int = use_lang_emb
_A : Union[str, Any] = layer_norm_eps
_A : Union[str, Any] = bos_index
_A : Union[str, Any] = eos_index
_A : Tuple = pad_index
_A : Any = unk_index
_A : Dict = mask_index
_A : str = is_encoder
_A : Union[str, Any] = max_position_embeddings
_A : Optional[int] = embed_init_std
_A : List[str] = init_std
_A : Optional[int] = summary_type
_A : Optional[Any] = summary_use_proj
_A : Dict = summary_activation
_A : Optional[Any] = summary_proj_to_labels
_A : Any = summary_first_dropout
_A : str = start_n_top
_A : Any = end_n_top
_A : int = mask_token_id
_A : List[Any] = lang_id
if "n_words" in kwargs:
_A : Any = kwargs["n_words"]
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase)
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Any = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Any = TensorFlowBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Tuple:
_A : Union[str, Any] = "sgugger/tiny-distilbert-classification"
_A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : List[Any] = TensorFlowBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[Any] = "sshleifer/tiny-gpt2"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase , [config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = TensorFlowBenchmark(__lowerCamelCase , [config])
_A : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> List[str]:
_A : str = "sshleifer/tiny-gpt2"
_A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> List[str]:
_A : str = "sshleifer/tiny-gpt2"
_A : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase , [config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = "patrickvonplaten/t5-tiny-random"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = TensorFlowBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0 , "Cannot do xla on CPU.")
def _lowerCamelCase ( self) -> str:
_A : Dict = "sshleifer/tiny-gpt2"
_A : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = TensorFlowBenchmark(__lowerCamelCase)
_A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Dict = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> Tuple:
_A : Optional[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = TensorFlowBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowerCAmelCase__ = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase__ = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
lowerCAmelCase__ = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , ) -> int:
_A : Optional[int] = len(references[0])
if any(len(__lowerCamelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
_A : int = [[refs[i] for refs in references] for i in range(__lowerCamelCase)]
_A : Tuple = TER(
normalized=__lowerCamelCase , no_punct=__lowerCamelCase , asian_support=__lowerCamelCase , case_sensitive=__lowerCamelCase , )
_A : str = sb_ter.corpus_score(__lowerCamelCase , __lowerCamelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 11 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
from __future__ import annotations
lowerCAmelCase__ = 10
def _UpperCAmelCase (UpperCamelCase__ : list[int] ):
_A : List[str] = 1
_A : List[Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
_A : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_A : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
_A : Union[str, Any] = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
_A : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int ):
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_A : Tuple = [p / w for p, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_A : Optional[int] = sorted(UpperCamelCase__ )
# declaring useful variables
_A : List[Any] = len(UpperCamelCase__ )
_A : Dict = 0
_A : List[Any] = 0
_A : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_A : Union[str, Any] = sorted_profit_by_weight[length - i - 1]
_A : Optional[int] = profit_by_weight.index(UpperCamelCase__ )
_A : List[str] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase__ = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase__ = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase__ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 11 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE = 10
def _lowerCamelCase ( self , **__lowerCamelCase) -> Union[str, Any]:
_A : Tuple = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase)
return config
def _lowerCamelCase ( self) -> Dict:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
_A : Tuple = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config()
_A : Optional[int] = scheduler_class(**__lowerCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_A : List[str] = torch.manual_seed(0)
_A : Any = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Optional[int] = sample.to(__lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_A : List[str] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
_A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase)
_A : List[str] = output.prev_sample
_A : Dict = torch.sum(torch.abs(__lowerCamelCase))
_A : Any = torch.mean(torch.abs(__lowerCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def _lowerCamelCase ( self) -> Any:
_A : List[Any] = self.scheduler_classes[0]
_A : Any = self.get_scheduler_config(prediction_type="v_prediction")
_A : Optional[int] = scheduler_class(**__lowerCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_A : str = torch.manual_seed(0)
_A : Tuple = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : int = sample.to(__lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
_A : int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = model(__lowerCamelCase , __lowerCamelCase)
_A : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase)
_A : Tuple = output.prev_sample
_A : Optional[int] = torch.sum(torch.abs(__lowerCamelCase))
_A : List[Any] = torch.mean(torch.abs(__lowerCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06) < 1e-3
def _lowerCamelCase ( self) -> str:
_A : List[Any] = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Dict = scheduler_class(**__lowerCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase)
_A : List[Any] = torch.manual_seed(0)
_A : Any = self.dummy_model()
_A : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A : Tuple = sample.to(__lowerCamelCase)
for t in scheduler.timesteps:
_A : List[str] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
_A : List[str] = model(__lowerCamelCase , __lowerCamelCase)
_A : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase)
_A : int = output.prev_sample
_A : str = torch.sum(torch.abs(__lowerCamelCase))
_A : List[str] = torch.mean(torch.abs(__lowerCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def _lowerCamelCase ( self) -> Tuple:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config()
_A : Tuple = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase)
_A : List[Any] = torch.manual_seed(0)
_A : List[Any] = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A : Union[str, Any] = sample.to(__lowerCamelCase)
for t in scheduler.timesteps:
_A : Union[str, Any] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
_A : Any = model(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase)
_A : str = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase))
_A : str = torch.mean(torch.abs(__lowerCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1e-3
| 11 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
_A : str = field
_A : Dict = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase) else {self.split: path_or_paths}
_A : Tuple = Json(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
_A : Optional[Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_A : List[Any] = None
_A : Any = None
_A : Union[str, Any] = None
_A : Dict = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
_A : Any = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0.")
_A : Optional[int] = dataset
_A : Optional[Any] = path_or_buf
_A : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : Dict = num_proc
_A : Optional[Any] = "utf-8"
_A : str = to_json_kwargs
def _lowerCamelCase ( self) -> int:
_A : Tuple = self.to_json_kwargs.pop("path_or_buf" , __lowerCamelCase)
_A : int = self.to_json_kwargs.pop("orient" , "records")
_A : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
_A : Optional[Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
_A : List[Any] = self.to_json_kwargs.pop("compression" , __lowerCamelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=__lowerCamelCase) as buffer:
_A : int = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead.")
_A : Dict = self._write(
file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs)
return written
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
_A , _A , _A , _A , _A : str = args
_A : Any = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_A : Optional[int] = batch.to_pandas().to_json(
path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase , ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
_A : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__lowerCamelCase)
else:
_A , _A : List[str] = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(__lowerCamelCase)
return written
| 11 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
_A : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
_A : Union[str, Any] = "xvjiarui/stable-diffusion-2-inpainting"
_A , _A : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(__lowerCamelCase , safety_checker=__lowerCamelCase)
_A : str = "Face of a yellow cat, high resolution, sitting on a park bench"
_A : int = jax.random.PRNGKey(0)
_A : Tuple = 5_0
_A : List[Any] = jax.device_count()
_A : List[str] = num_samples * [prompt]
_A : Tuple = num_samples * [init_image]
_A : Dict = num_samples * [mask_image]
_A , _A , _A : Any = pipeline.prepare_inputs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# shard inputs and rng
_A : Tuple = replicate(__lowerCamelCase)
_A : Tuple = jax.random.split(__lowerCamelCase , jax.device_count())
_A : Dict = shard(__lowerCamelCase)
_A : str = shard(__lowerCamelCase)
_A : List[str] = shard(__lowerCamelCase)
_A : Union[str, Any] = pipeline(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , jit=__lowerCamelCase)
_A : Any = output.images.reshape(__lowerCamelCase , 5_1_2 , 5_1_2 , 3)
_A : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_A : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
_A : int = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4])
print(F"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 11 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 1 |
from string import ascii_uppercase
lowerCAmelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase__ = dict(enumerate(ascii_uppercase))
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Dict = len(UpperCamelCase__ )
_A : Union[str, Any] = 0
while True:
if x == i:
_A : str = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Any = ""
_A : Union[str, Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_A : str = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Union[str, Any] = ""
_A : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_A : List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCAmelCase ():
_A : int = "THE GERMAN ATTACK"
_A : List[str] = "SECRET"
_A : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
_A : Any = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 11 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int = 600851475143 ):
try:
_A : List[str] = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_A : List[Any] = 1
_A : int = 2
while i * i <= n:
while n % i == 0:
_A : str = i
n //= i
i += 1
if n > 1:
_A : Dict = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 11 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] ):
_A : Tuple = []
for i in range(len(UpperCamelCase__ ) ):
_A : Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_A : Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_A : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase__ )
return next_generation
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ):
_A : Union[str, Any] = []
for _ in range(UpperCamelCase__ ):
# Create output image
_A : Optional[int] = Image.new("RGB" , (len(cells[0] ), len(UpperCamelCase__ )) )
_A : Optional[Any] = img.load()
# Save cells to image
for x in range(len(UpperCamelCase__ ) ):
for y in range(len(cells[0] ) ):
_A : Dict = 255 - cells[y][x] * 255
_A : List[str] = (colour, colour, colour)
# Save image
images.append(UpperCamelCase__ )
_A : List[Any] = new_generation(UpperCamelCase__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 1 |
from collections.abc import Sequence
def _UpperCAmelCase (UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ):
if not arr:
return 0
_A : int = 0 if allow_empty_subarrays else float("-inf" )
_A : Optional[int] = 0.0
for num in arr:
_A : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
_A : Dict = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 11 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 1 |
from itertools import product
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_A : Dict = sides_number
_A : Any = max_face_number * dice_number
_A : Optional[int] = [0] * (max_total + 1)
_A : Any = 1
_A : str = range(UpperCamelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ):
_A : Tuple = sum(UpperCamelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCAmelCase ():
_A : Any = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_A : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_A : Any = 0
_A : int = 9
_A : List[str] = 4 * 9
_A : Dict = 6
for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_A : Dict = (4**9) * (6**6)
_A : List[str] = peter_wins_count / total_games_number
_A : Dict = round(UpperCamelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"{solution() = }")
| 11 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 1 |
import math
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase=0) -> str: # a graph with Node 0,1,...,N-1
_A : Tuple = n
_A : Optional[int] = [
[math.inf for j in range(0 , __lowerCamelCase)] for i in range(0 , __lowerCamelCase)
] # adjacency matrix for weight
_A : List[str] = [
[math.inf for j in range(0 , __lowerCamelCase)] for i in range(0 , __lowerCamelCase)
] # dp[i][j] stores minimum distance from i to j
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int:
_A : Dict = w
def _lowerCamelCase ( self) -> Union[str, Any]:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
_A : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase (UpperCamelCase__ : dict ):
return (data["data"], data["target"])
def _UpperCAmelCase (UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
_A : str = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
_A : int = xgb.predict(UpperCamelCase__ )
_A : int = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def _UpperCAmelCase ():
_A : List[str] = fetch_california_housing()
_A , _A : Tuple = data_handling(UpperCamelCase__ )
_A , _A , _A , _A : Optional[Any] = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
_A : List[str] = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}" )
print(f"Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = 8 , **__lowerCamelCase , ) -> None:
super().__init__(**__lowerCamelCase)
_A : List[str] = do_rescale
_A : Dict = rescale_factor
_A : Any = do_pad
_A : Union[str, Any] = pad_size
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase) -> np.ndarray:
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None) -> Any:
_A , _A : Optional[int] = get_image_size(__lowerCamelCase)
_A : Dict = (old_height // size + 1) * size - old_height
_A : str = (old_width // size + 1) * size - old_width
return pad(__lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> Union[str, Any]:
_A : int = do_rescale if do_rescale is not None else self.do_rescale
_A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
_A : Dict = pad_size if pad_size is not None else self.pad_size
_A : Optional[int] = make_list_of_images(__lowerCamelCase)
if not valid_images(__lowerCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(__lowerCamelCase) for image in images]
if do_rescale:
_A : Optional[int] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase) for image in images]
if do_pad:
_A : str = [self.pad(__lowerCamelCase , size=__lowerCamelCase) for image in images]
_A : List[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase) for image in images]
_A : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase)
| 11 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 1 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list ):
if not nums:
raise ValueError("List is empty" )
return sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 11 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
_A : List[Any] = get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(__lowerCamelCase) , torch_builtin(__lowerCamelCase)))
self.assertFalse(torch.allclose(gelu_python(__lowerCamelCase) , gelu_new(__lowerCamelCase)))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[int] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0])
_A : int = get_activation("gelu")
_A : Optional[int] = get_activation("gelu_10")
_A : Dict = torch_builtin(__lowerCamelCase)
_A : int = geluaa(__lowerCamelCase)
_A : Dict = torch.where(y_gelu_aa < 1_0.0 , 1 , 0)
self.assertTrue(torch.max(__lowerCamelCase).item() == 1_0.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _lowerCamelCase ( self) -> Tuple:
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(__lowerCamelCase):
get_activation("bogus")
with self.assertRaises(__lowerCamelCase):
get_activation(__lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = get_activation("gelu")
_A : Optional[Any] = 1
_A : Union[str, Any] = get_activation("gelu")
self.assertEqual(acta.a , 1)
with self.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = acta.a
| 11 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase__ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def _lowerCamelCase ( self) -> Any:
_A , _A , _A : Union[str, Any] = _str_to_version_tuple(self.version_str)
def __repr__( self) -> List[str]:
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def _lowerCamelCase ( self) -> Tuple:
return self.major, self.minor, self.patch
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
if isinstance(__lowerCamelCase , __lowerCamelCase):
return Version(__lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
return other
raise TypeError(F"{other} (type {type(__lowerCamelCase)}) cannot be compared to version.")
def __eq__( self , __lowerCamelCase) -> int:
try:
_A : Optional[Any] = self._validate_operand(__lowerCamelCase)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __lowerCamelCase) -> List[Any]:
_A : int = self._validate_operand(__lowerCamelCase)
return self.tuple < other.tuple
def __hash__( self) -> Dict:
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Union[str, Any]:
_A : List[Any] = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self) -> str:
return self.version_str
def _UpperCAmelCase (UpperCamelCase__ : Any ):
_A : Any = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(UpperCamelCase__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 11 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 1 |
import doctest
from collections import deque
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> None:
_A : Tuple = [2, 1, 2, -1]
_A : Dict = [1, 2, 3, 4]
def _lowerCamelCase ( self) -> list[float]:
_A : int = len(self.first_signal)
_A : List[str] = len(self.second_signal)
_A : List[str] = max(__lowerCamelCase , __lowerCamelCase)
# create a zero matrix of max_length x max_length
_A : Dict = [[0] * max_length for i in range(__lowerCamelCase)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase):
_A : int = deque(self.second_signal)
rotated_signal.rotate(__lowerCamelCase)
for j, item in enumerate(__lowerCamelCase):
matrix[i][j] += item
# multiply the matrix with the first signal
_A : Dict = np.matmul(np.transpose(__lowerCamelCase) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 11 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 1 |
lowerCAmelCase__ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = "gelu"
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> Any:
_A : Optional[int] = parent
_A : str = batch_size
_A : Dict = seq_length
_A : Any = is_training
_A : Any = use_labels
_A : Tuple = vocab_size
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : str = intermediate_size
_A : int = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : Any = eos_token_id
_A : int = pad_token_id
_A : Optional[Any] = bos_token_id
def _lowerCamelCase ( self) -> Any:
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_A : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_A : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1)
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A : Dict = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : List[str] = 2_0
_A : Optional[Any] = model_class_name(__lowerCamelCase)
_A : str = model.encode(inputs_dict["input_ids"])
_A , _A : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
_A : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : str = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_A : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
_A : List[str] = model.decode(__lowerCamelCase , __lowerCamelCase)
_A : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]:
_A : Union[str, Any] = 2_0
_A : Union[str, Any] = model_class_name(__lowerCamelCase)
_A : str = model.encode(inputs_dict["input_ids"])
_A , _A : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_A : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_A : List[str] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : Optional[int] = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase)
_A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , ):
if attention_mask is None:
_A : Tuple = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_A : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = FlaxPegasusModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self) -> str:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Union[str, Any] = model_class(__lowerCamelCase)
@jax.jit
def encode_jitted(__lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase)
with self.subTest("JIT Enabled"):
_A : Any = encode_jitted(**__lowerCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_A : Union[str, Any] = encode_jitted(**__lowerCamelCase).to_tuple()
self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase))
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCamelCase ( self) -> Dict:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : int = model_class(__lowerCamelCase)
_A : Dict = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
_A : Union[str, Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled"):
_A : List[str] = decode_jitted(**__lowerCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_A : Optional[int] = decode_jitted(**__lowerCamelCase).to_tuple()
self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase))
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _lowerCamelCase ( self) -> List[str]:
for model_class_name in self.all_model_classes:
_A : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__lowerCamelCase)
_A : Union[str, Any] = np.ones((1, 1))
_A : Optional[int] = model(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
@slow
def _lowerCamelCase ( self) -> Tuple:
_A : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
_A : str = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
_A : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_A : List[Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_A : Any = tokenizer(__lowerCamelCase , return_tensors="np" , truncation=__lowerCamelCase , max_length=5_1_2 , padding=__lowerCamelCase)
_A : List[Any] = model.generate(**__lowerCamelCase , num_beams=2).sequences
_A : Optional[Any] = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
assert tgt_text == decoded
| 11 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
set_seed(770)
UpperCAmelCase__ = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase__ = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase__ = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase__ = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _a ( a :List[str] , a :Union[str, Any]=False ) -> List[Any]:
a = model_type
if use_small:
key += "_small"
return os.path.join(a , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _a ( a :Any , a :Optional[Any] ) -> Tuple:
os.makedirs(a , exist_ok=a )
hf_hub_download(repo_id=a , filename=a , local_dir=a )
def _a ( a :Union[str, Any] , a :Optional[Any] , a :Optional[Any]=False , a :Optional[Any]="text" ) -> List[str]:
if model_type == "text":
a = BarkSemanticModel
a = BarkSemanticConfig
a = BarkSemanticGenerationConfig
elif model_type == "coarse":
a = BarkCoarseModel
a = BarkCoarseConfig
a = BarkCoarseGenerationConfig
elif model_type == "fine":
a = BarkFineModel
a = BarkFineConfig
a = BarkFineGenerationConfig
else:
raise NotImplementedError()
a = F"""{model_type}_small""" if use_small else model_type
a = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
a = torch.load(a , map_location=a )
# this is a hack
a = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
a = model_args['''vocab_size''']
a = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a = model_args.pop('''n_head''' )
a = model_args.pop('''n_embd''' )
a = model_args.pop('''n_layer''' )
a = ConfigClass(**checkpoint['''model_args'''] )
a = ModelClass(config=a )
a = GenerationConfigClass()
a = model_generation_config
a = checkpoint['''model''']
# fixup checkpoint
a = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(a ):
# replace part of the key with corresponding layer name in HF implementation
a = k[len(a ) :]
for old_layer_name in new_layer_name_dict:
a = new_k.replace(a , new_layer_name_dict[old_layer_name] )
a = state_dict.pop(a )
a = set(state_dict.keys() ) - set(model.state_dict().keys() )
a = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
a = set(model.state_dict().keys() ) - set(state_dict.keys() )
a = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(a ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(a ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(a , strict=a )
a = model.num_parameters(exclude_embeddings=a )
a = checkpoint['''best_val_loss'''].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(a , 3 )} loss""" )
model.eval()
model.to(a )
del checkpoint, state_dict
return model
def _a ( a :Any , a :List[str]=False , a :Dict="text" ) -> Any:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a = '''cpu''' # do conversion on cpu
a = _get_ckpt_path(a , use_small=a )
a = _load_model(a , a , model_type=a , use_small=a )
# load bark initial model
a = _bark_load_model(a , '''cpu''' , model_type=a , use_small=a )
if model_type == "text":
a = bark_model['''model''']
if model.num_parameters(exclude_embeddings=a ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
a = 5
a = 10
if model_type in ["text", "coarse"]:
a = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
a = bark_model(a )[0]
a = model(a )
# take last logits
a = output_new_model_total.logits[:, [-1], :]
else:
a = 3
a = 8
a = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a = model(a , a )
a = bark_model(a , a )
a = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
def _a ( a :int , a :Optional[Any] , a :Dict , a :List[str] , a :Tuple , a :Optional[int] , ) -> Union[str, Any]:
a = os.path.join(a , a )
a = BarkSemanticConfig.from_pretrained(os.path.join(a , '''config.json''' ) )
a = BarkCoarseConfig.from_pretrained(os.path.join(a , '''config.json''' ) )
a = BarkFineConfig.from_pretrained(os.path.join(a , '''config.json''' ) )
a = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
a = BarkSemanticModel.from_pretrained(a )
a = BarkCoarseModel.from_pretrained(a )
a = BarkFineModel.from_pretrained(a )
a = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
a = BarkConfig.from_sub_model_configs(
a , a , a , a )
a = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a = BarkModel(a )
a = semantic
a = coarseAcoustic
a = fineAcoustic
a = codec
a = bark_generation_config
Path(a ).mkdir(exist_ok=a )
bark.save_pretrained(a , repo_id=a , push_to_hub=a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE_: Dict ='Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ = get_sagemaker_input()
else:
UpperCAmelCase_ = get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : Dict=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser("config" , description=snake_case_ )
else:
UpperCAmelCase_ = argparse.ArgumentParser("Accelerate config command" , description=snake_case_ )
parser.add_argument(
"--config_file" , default=snake_case_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ = args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
UpperCAmelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = config_command_parser()
UpperCAmelCase_ = parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'vocab_file': 'spiece.model'}
lowerCamelCase : Optional[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowerCamelCase : Any = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCamelCase : Optional[Any] = '▁'
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : int = VOCAB_FILES_NAMES
lowerCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str]=True , UpperCamelCase : Any=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : List[str]="[CLS]" , UpperCamelCase : List[str]="[SEP]" , UpperCamelCase : int="<unk>" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Optional[Any]="<pad>" , UpperCamelCase : List[str]="[CLS]" , UpperCamelCase : Tuple="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = (
AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase , normalized=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase )
else mask_token
)
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@property
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : int ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Tuple , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = []
lowercase__ = ''''''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(UpperCamelCase )
lowercase__ = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ (self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 2 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[str] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A : Dict = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
A, A : Optional[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
A : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A, A : List[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def lowerCAmelCase_ ( snake_case__ , snake_case__ = None ):
'''simple docstring'''
if log_level is None:
A : str = os.environ.get('''ACCELERATE_LOG_LEVEL''' , snake_case__ )
A : Dict = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__ , {} )
| 3 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCamelCase )
lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCAmelCase = 847
lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCAmelCase = 150
lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCAmelCase = 171
lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCAmelCase = 133
lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCAmelCase = 19
lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCAmelCase = 65
lowerCAmelCase = 'mapillary-vistas-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any] ):
lowerCAmelCase = dct.pop(lowerCamelCase )
lowerCAmelCase = val
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Any ):
lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCAmelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:dim, :]
lowerCAmelCase = in_proj_bias[: dim]
lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase = in_proj_weight[
-dim :, :
]
lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCamelCase : Dict , lowerCamelCase : List[str] ):
# fmt: off
lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: hidden_size, :]
lowerCAmelCase = in_proj_bias[:config.hidden_size]
lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size :, :]
lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: hidden_size, :]
lowerCAmelCase = in_proj_bias[:config.hidden_size]
lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size :, :]
lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : bool = False ):
lowerCAmelCase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , 'rb' ) as f:
lowerCAmelCase = pickle.load(lowerCamelCase )
lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCAmelCase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCAmelCase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
lowerCAmelCase , lowerCAmelCase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCAmelCase = prepare_img()
if "vistas" in model_name:
lowerCAmelCase = 65
elif "cityscapes" in model_name:
lowerCAmelCase = 65535
else:
lowerCAmelCase = 255
lowerCAmelCase = True if 'ade' in model_name else False
lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
lowerCAmelCase = image_processor(lowerCamelCase , return_tensors='pt' )
lowerCAmelCase = model(**lowerCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCAmelCase = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 4 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase =ParquetDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowercase =features.copy() if features else default_expected_features
_lowercase =(
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase =ParquetDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowercase =ParquetDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if issubclass(__snake_case , __snake_case ):
_lowercase =parquet_path
elif issubclass(__snake_case , __snake_case ):
_lowercase =[parquet_path]
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowercase =ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=("train",) ) -> Tuple:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case )
for split in splits:
_lowercase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase =ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowercase =features.copy() if features else default_expected_features
_lowercase =(
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase =ParquetDatasetReader({'''train''': parquet_path} , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
if split:
_lowercase ={split: parquet_path}
else:
_lowercase ='''train'''
_lowercase ={'''train''': parquet_path, '''test''': parquet_path}
_lowercase =tmp_path / '''cache'''
_lowercase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowercase =ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =ParquetDatasetWriter(__snake_case , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_lowercase =pq.ParquetFile(tmp_path / '''foo.parquet''' )
_lowercase =pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =str(shared_datadir / '''test_image_rgb.jpg''' )
_lowercase ={'''image''': [image_path]}
_lowercase =Features({'''image''': Image()} )
_lowercase =Dataset.from_dict(__snake_case , features=__snake_case )
_lowercase =ParquetDatasetWriter(__snake_case , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_lowercase =Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_lowercase =ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
assert get_writer_batch_size(__snake_case ) == expected
| 5 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = tempfile.mkdtemp()
# fmt: off
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__a = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Optional[int]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_image_processor()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
__a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
__a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__a = self.prepare_image_inputs()
__a = image_processor(_snake_case , return_tensors='''np''' )
__a = processor(images=_snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__a = '''lower newer'''
__a = processor(text=_snake_case )
__a = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_snake_case ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(_snake_case )
__a = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 6 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : int,lowercase_ : Union[str, Any]=1_3,lowercase_ : List[str]=7,lowercase_ : Tuple=True,lowercase_ : List[str]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=True,lowercase_ : Tuple=9_9,lowercase_ : Union[str, Any]=3_2,lowercase_ : str=2,lowercase_ : Optional[Any]=4,lowercase_ : Optional[int]=3_7,lowercase_ : int="gelu",lowercase_ : Any=0.1,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=5_1_2,lowercase_ : int=1_6,lowercase_ : Dict=2,lowercase_ : Any=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : Dict=4,lowercase_ : Dict=None,)-> Optional[int]:
'''simple docstring'''
A__ = parent
A__ = 1_3
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 9_9
A__ = 3_8_4
A__ = 2
A__ = 4
A__ = 3_7
A__ = 'gelu'
A__ = 0.1
A__ = 0.1
A__ = 5_1_2
A__ = 1_6
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = 1_2_8
A__ = 2
A__ = 9
A__ = 1
A__ = None
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=lowercase_,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[Any],lowercase_ : Optional[Any],lowercase_ : List[str],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Dict )-> List[Any]:
'''simple docstring'''
A__ = TFConvBertModel(config=lowercase_ )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : int,lowercase_ : Any,lowercase_ : Any,lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = TFConvBertForMaskedLM(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Tuple,lowercase_ : Optional[int],lowercase_ : Any,lowercase_ : Dict,lowercase_ : str,lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFConvBertForSequenceClassification(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str],lowercase_ : Tuple,lowercase_ : Optional[int],lowercase_ : List[str],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : List[str],lowercase_ : Dict )-> int:
'''simple docstring'''
A__ = self.num_choices
A__ = TFConvBertForMultipleChoice(config=lowercase_ )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case__ ( self : int,lowercase_ : int,lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFConvBertForTokenClassification(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str,lowercase_ : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Optional[Any],lowercase_ : List[Any],lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : Dict )-> Tuple:
'''simple docstring'''
A__ = TFConvBertForQuestionAnswering(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = TFConvBertModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def snake_case__ ( self : Dict )-> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = True
if hasattr(lowercase_,'use_cache' ):
A__ = True
A__ = getattr(self.model_tester,'encoder_seq_length',self.model_tester.seq_length )
A__ = getattr(self.model_tester,'key_length',lowercase_ )
for model_class in self.all_model_classes:
A__ = self._prepare_for_class(lowercase_,lowercase_ )
A__ = model_class(lowercase_ )
A__ = len(model(lowercase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_,saved_model=lowercase_ )
A__ = os.path.join(lowercase_,'saved_model','1' )
A__ = tf.keras.models.load_model(lowercase_ )
A__ = model(lowercase_ )
if self.is_encoder_decoder:
A__ = outputs['encoder_hidden_states']
A__ = outputs['encoder_attentions']
else:
A__ = outputs['hidden_states']
A__ = outputs['attentions']
self.assertEqual(len(lowercase_ ),lowercase_ )
A__ = getattr(
self.model_tester,'expected_num_hidden_layers',self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ),lowercase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def snake_case__ ( self : List[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester,'decoder_seq_length',self.model_tester.seq_length )
A__ = getattr(self.model_tester,'encoder_seq_length',self.model_tester.seq_length )
A__ = getattr(self.model_tester,'key_length',lowercase_ )
A__ = getattr(self.model_tester,'key_length',lowercase_ )
def check_decoder_attentions_output(lowercase_ : Union[str, Any] ):
A__ = len(lowercase_ )
self.assertEqual(out_len % 2,0 )
A__ = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(lowercase_ : str ):
A__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = model_class(lowercase_ )
A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) )
A__ = len(lowercase_ )
self.assertEqual(config.output_hidden_states,lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
A__ = model_class(lowercase_ )
A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) )
self.assertEqual(config.output_hidden_states,lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(lowercase_ )
A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) )
self.assertEqual(config.output_hidden_states,lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(lowercase_ )
A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states,lowercase_ )
check_encoder_attentions_output(lowercase_ )
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
A__ = [1, 6, 7_6_8]
self.assertEqual(output.shape,lowercase_ )
A__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],lowercase_,atol=1E-4 )
| 7 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
from statistics import mean
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = 0
# Number of processes finished
snake_case_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case_ = [0] * no_of_process
# List to include calculation results
snake_case_ = [0] * no_of_process
# Sort by arrival time.
snake_case_ = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
snake_case_ = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case_ = arrival_time[i]
snake_case_ = 0
# Index showing the location of the process being performed
snake_case_ = 0
# Saves the current response ratio.
snake_case_ = 0
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case_ = temp
snake_case_ = i
# Calculate the turn around time
snake_case_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(f"""average waiting time : {mean(waiting_time):.5f}""")
print(f"""average turn around time : {mean(turn_around_time):.5f}""") | 8 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[Any] ='▁'
__lowerCAmelCase : Union[str, Any] ={'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] ={
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase : Optional[int] ={
'google/pegasus-xsum': 5_1_2,
}
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict="<pad>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :Tuple="<unk>" , lowerCAmelCase__ :str="<mask_2>" , lowerCAmelCase__ :Dict="<mask_1>" , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Union[str, Any]=103 , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :str , ) -> None:
__SCREAMING_SNAKE_CASE : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is'''
f''' {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__SCREAMING_SNAKE_CASE : List[str] = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
__SCREAMING_SNAKE_CASE : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = mask_token_sent
__SCREAMING_SNAKE_CASE : Any = vocab_file
__SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
__SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__( self :Dict ) -> int:
return len(self.sp_model ) + self.offset
def __magic_name__( self :Dict ) -> Dict[str, int]:
__SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self :Any , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
__SCREAMING_SNAKE_CASE : int = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[Any]=False ) -> Dict:
return 1
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__( self :int , lowerCAmelCase__ :List , lowerCAmelCase__ :Optional[List] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 9 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if gpta_config_file == "":
lowerCamelCase__: Optional[Any] =GPTaConfig()
else:
lowerCamelCase__: Optional[Any] =GPTaConfig.from_json_file(__a )
lowerCamelCase__: Any =GPTaModel(__a )
# Load weights from numpy
load_tf_weights_in_gpta(__a , __a , __a )
# Save pytorch-model
lowerCamelCase__: Any =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCamelCase__: Optional[Any] =pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __a )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 10 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
__lowerCamelCase = OmegaConf.load(A__ )
if display:
print(yaml.dump(OmegaConf.to_container(A__ ) ) )
return config
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any]=None , A__ : Any=None ):
'''simple docstring'''
if conf_path is None:
__lowerCamelCase = """./model_checkpoints/vqgan_only.yaml"""
__lowerCamelCase = load_config(A__ , display=A__ )
__lowerCamelCase = VQModel(**config.model.params )
if ckpt_path is None:
__lowerCamelCase = """./model_checkpoints/vqgan_only.pt"""
__lowerCamelCase = torch.load(A__ , map_location=A__ )
if ".ckpt" in ckpt_path:
__lowerCamelCase = sd["""state_dict"""]
model.load_state_dict(A__ , strict=A__ )
model.to(A__ )
del sd
return model
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = model.encode(A__ )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
__lowerCamelCase = model.decode(A__ )
return xrec
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any]=False ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = string.rsplit(""".""" , 1 )
if reload:
__lowerCamelCase = importlib.import_module(A__ )
importlib.reload(A__ )
return getattr(importlib.import_module(A__ , package=A__ ) , cls )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : Dict=True , A__ : int=True ):
'''simple docstring'''
__lowerCamelCase = instantiate_from_config(A__ )
if sd is not None:
model.load_state_dict(A__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( A__ : List[Any] , A__ : str , A__ : Dict , A__ : List[Any] ):
'''simple docstring'''
if ckpt:
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = pl_sd["""global_step"""]
print(f'loaded model from global step {global_step}.' )
else:
__lowerCamelCase = {"""state_dict""": None}
__lowerCamelCase = None
__lowerCamelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=A__ , eval_mode=A__ )["""model"""]
return model, global_step
| 12 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = '''codegen'''
_UpperCAmelCase : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , lowerCAmelCase__ : int=5_0400 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[int]=2048 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : Optional[int]=28 , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : Union[str, Any]=64 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple="gelu_new" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Any=1E-5 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Any=5_0256 , lowerCAmelCase__ : int=5_0256 , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : str , ):
SCREAMING_SNAKE_CASE_: Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_: List[str] = n_ctx
SCREAMING_SNAKE_CASE_: List[Any] = n_positions
SCREAMING_SNAKE_CASE_: List[str] = n_embd
SCREAMING_SNAKE_CASE_: Optional[int] = n_layer
SCREAMING_SNAKE_CASE_: Optional[Any] = n_head
SCREAMING_SNAKE_CASE_: Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE_: List[str] = rotary_dim
SCREAMING_SNAKE_CASE_: Dict = activation_function
SCREAMING_SNAKE_CASE_: Dict = resid_pdrop
SCREAMING_SNAKE_CASE_: List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_: List[Any] = attn_pdrop
SCREAMING_SNAKE_CASE_: int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_: str = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = use_cache
SCREAMING_SNAKE_CASE_: Any = bos_token_id
SCREAMING_SNAKE_CASE_: Optional[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : str = "default" , lowerCAmelCase__ : List[PatchingSpec] = None , lowerCAmelCase__ : bool = False , ):
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__)
if not getattr(self._config , "pad_token_id" , lowerCAmelCase__):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_: Optional[int] = 0
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs")
SCREAMING_SNAKE_CASE_: Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_: int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self._config.n_layer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return self._config.n_head
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_: str = super(lowerCAmelCase__ , self).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_: str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Union[str, Any] = seqlen + 2
SCREAMING_SNAKE_CASE_: Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_: Tuple = [
(torch.zeros(lowerCAmelCase__), torch.zeros(lowerCAmelCase__)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE_: Dict = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_: List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__)] , dim=1)
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return 13
| 13 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 14 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCAmelCase ( a_ , a_ , a_=1E-12 ) -> List[str]:
"""simple docstring"""
__A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a_ , axis=1 ) , a_min=a_ ) ).T
__A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a_ , axis=1 ) , a_min=a_ ) ).T
return jnp.matmul(a_ , norm_emb_a.T )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = jnp.floataa
def UpperCamelCase_ ( self : List[str] ):
__A = FlaxCLIPVisionModule(self.config.vision_config )
__A = nn.Dense(self.config.projection_dim ,use_bias=A ,dtype=self.dtype )
__A = self.param("concept_embeds" ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
__A = self.param(
"special_care_embeds" ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
__A = self.param("concept_embeds_weights" ,jax.nn.initializers.ones ,(17,) )
__A = self.param("special_care_embeds_weights" ,jax.nn.initializers.ones ,(3,) )
def __call__( self : Tuple ,A : Any ):
__A = self.vision_model(A )[1]
__A = self.visual_projection(A )
__A = jax_cosine_distance(A ,self.special_care_embeds )
__A = jax_cosine_distance(A ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__A = 0.0
__A = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__A = jnp.round(A ,3 )
__A = jnp.any(special_scores > 0 ,axis=1 ,keepdims=A )
# Use a lower threshold if an image has any special care concept
__A = is_special_care * 0.01
__A = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__A = jnp.round(A ,3 )
__A = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = CLIPConfig
snake_case_ = "clip_input"
snake_case_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int ,A : CLIPConfig ,A : Optional[Tuple] = None ,A : int = 0 ,A : jnp.dtype = jnp.floataa ,A : bool = True ,**A : Tuple ,):
if input_shape is None:
__A = (1, 2_24, 2_24, 3)
__A = self.module_class(config=A ,dtype=A ,**A )
super().__init__(A ,A ,input_shape=A ,seed=A ,dtype=A ,_do_init=_do_init )
def UpperCamelCase_ ( self : int ,A : jax.random.KeyArray ,A : Tuple ,A : FrozenDict = None ):
# init input tensor
__A = jax.random.normal(A ,A )
__A , __A = jax.random.split(A )
__A = {"params": params_rng, "dropout": dropout_rng}
__A = self.module.init(A ,A )["params"]
return random_params
def __call__( self : Tuple ,A : Dict ,A : dict = None ,):
__A = jnp.transpose(A ,(0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} ,jnp.array(A ,dtype=jnp.floataa ) ,rngs={} ,)
| 15 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCAmelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
lowercase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
lowercase__ : List[Any] = CLIPTextModel(_snake_case )
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Dict = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Any=0 ) -> Any:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[Any] = torch.manual_seed(_snake_case )
else:
lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : List[Any] = 2
lowercase__ : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,)
lowercase__ : str = floats_tensor(control_image.shape ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_snake_case : Optional[int] ):
if isinstance(_snake_case ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : Any = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowercase__ : Dict = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
lowercase__ : int = CLIPTextModel(_snake_case )
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : int = MultiControlNetModel([controlneta, controlneta] )
lowercase__ : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : int = torch.manual_seed(_snake_case )
else:
lowercase__ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : int = 2
lowercase__ : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,),
]
lowercase__ : Dict = floats_tensor(control_image[0].shape ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowercase__ : Optional[Any] = 10.0
lowercase__ : Tuple = 4
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[Any] = steps
lowercase__ : Any = scale
lowercase__ : Optional[Any] = pipe(**_snake_case )[0]
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[int] = steps
lowercase__ : int = scale
lowercase__ : List[str] = pipe(**_snake_case ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
lowercase__ : int = self.get_dummy_inputs(_snake_case )
lowercase__ : Optional[int] = steps
lowercase__ : Dict = scale
lowercase__ : Dict = pipe(**_snake_case ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : List[Any] = steps
lowercase__ : Optional[int] = scale
lowercase__ : List[Any] = pipe(**_snake_case ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case ,controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : List[str] = '''evil space-punk bird'''
lowercase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase__ : Tuple = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase__ : List[Any] = pipe(
_snake_case ,_snake_case ,control_image=_snake_case ,generator=_snake_case ,output_type='''np''' ,num_inference_steps=50 ,strength=0.6 ,)
lowercase__ : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
lowercase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 16 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : str, UpperCAmelCase__ : Union[str, Any]=1_3, UpperCAmelCase__ : str=7, UpperCAmelCase__ : int=True, UpperCAmelCase__ : Optional[int]=True, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : str=True, UpperCAmelCase__ : Tuple=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : List[str]=5, UpperCAmelCase__ : int=4, UpperCAmelCase__ : str=3_7, UpperCAmelCase__ : Dict="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Tuple=0.1, UpperCAmelCase__ : Dict=5_1_2, UpperCAmelCase__ : List[str]=1_6, UpperCAmelCase__ : Optional[Any]=2, UpperCAmelCase__ : List[Any]=0.02, UpperCAmelCase__ : str=3, UpperCAmelCase__ : Dict=4, UpperCAmelCase__ : Union[str, Any]=None, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _lowercase ( self : Optional[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__lowercase = ids_tensor([self.batch_size], self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ):
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, use_stable_embedding=UpperCAmelCase__, )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int ):
__lowercase = OpenLlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[Any], ):
__lowercase = True
__lowercase = OpenLlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, encoder_attention_mask=UpperCAmelCase__, )
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, )
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Any, ):
__lowercase = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int, UpperCAmelCase__ : str, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple, ):
__lowercase = True
__lowercase = True
__lowercase = OpenLlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, encoder_attention_mask=UpperCAmelCase__, use_cache=UpperCAmelCase__, )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3), config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens], dim=-1 )
__lowercase = torch.cat([input_mask, next_mask], dim=-1 )
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, encoder_attention_mask=UpperCAmelCase__, output_hidden_states=UpperCAmelCase__, )["hidden_states"][0]
__lowercase = model(
UpperCAmelCase__, attention_mask=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, encoder_attention_mask=UpperCAmelCase__, past_key_values=UpperCAmelCase__, output_hidden_states=UpperCAmelCase__, )["hidden_states"][0]
# select random slice
__lowercase = ids_tensor((1,), output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-3 ) )
def _lowercase ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowercase ,lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Any = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Dict = False
def _lowercase ( self : Optional[int] ):
__lowercase = OpenLlamaModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : int ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["input_ids"]
__lowercase = input_ids.ne(1 ).to(UpperCAmelCase__ )
__lowercase = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[Any] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = "single_label_classification"
__lowercase = input_dict["input_ids"]
__lowercase = input_ids.ne(1 ).to(UpperCAmelCase__ )
__lowercase = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = "multi_label_classification"
__lowercase = input_dict["input_ids"]
__lowercase = input_ids.ne(1 ).to(UpperCAmelCase__ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = OpenLlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def _lowercase ( self : Dict ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[Any] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 1_0], config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = OpenLlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
__lowercase = original_model(UpperCAmelCase__ ).last_hidden_state
__lowercase = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"type": scaling_type, "factor": 10.0}
__lowercase = OpenLlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
__lowercase = scaled_model(UpperCAmelCase__ ).last_hidden_state
__lowercase = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1E-5 ) )
| 17 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
__lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE_ : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE_ : Tuple = _calculate(days - 1 , lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE_ : str = _calculate(days - 1 , lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE_ : Dict = prizestrings
return prizestrings
def _snake_case ( lowerCAmelCase : int = 3_0 ):
"""simple docstring"""
return _calculate(lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 18 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ):
lowerCamelCase_ = prime_sieve(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(lowerCamelCase__ ) ):
for j in range(i + length , len(lowerCamelCase__ ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
lowercase : Dict = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase ):
_a : Tuple= "tapas"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=1024 ,snake_case=[3, 256, 256, 2, 256, 256, 10] ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=0 ,snake_case=10.0 ,snake_case=0 ,snake_case=1.0 ,snake_case=None ,snake_case=1.0 ,snake_case=False ,snake_case=None ,snake_case=1.0 ,snake_case=1.0 ,snake_case=False ,snake_case=False ,snake_case="ratio" ,snake_case=None ,snake_case=None ,snake_case=64 ,snake_case=32 ,snake_case=False ,snake_case=True ,snake_case=False ,snake_case=False ,snake_case=True ,snake_case=False ,snake_case=None ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,**snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase : List[str] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Dict = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : str = type_vocab_sizes
lowercase : Union[str, Any] = initializer_range
lowercase : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase : Dict = positive_label_weight
lowercase : Dict = num_aggregation_labels
lowercase : Optional[int] = aggregation_loss_weight
lowercase : Any = use_answer_as_supervision
lowercase : int = answer_loss_importance
lowercase : Tuple = use_normalized_answer_loss
lowercase : List[str] = huber_loss_delta
lowercase : Optional[Any] = temperature
lowercase : Dict = aggregation_temperature
lowercase : Union[str, Any] = use_gumbel_for_cells
lowercase : Dict = use_gumbel_for_aggregation
lowercase : Optional[Any] = average_approximation_function
lowercase : Optional[Any] = cell_selection_preference
lowercase : Optional[Any] = answer_loss_cutoff
lowercase : List[Any] = max_num_rows
lowercase : int = max_num_columns
lowercase : List[str] = average_logits_per_cell
lowercase : str = select_one_column
lowercase : List[Any] = allow_empty_column_selection
lowercase : str = init_cell_selection_weights_to_zero
lowercase : str = reset_position_index_per_cell
lowercase : Any = disable_per_token_loss
# Aggregation hyperparameters
lowercase : List[Any] = aggregation_labels
lowercase : str = no_aggregation_label_index
if isinstance(self.aggregation_labels ,snake_case ):
lowercase : List[str] = {int(snake_case ): v for k, v in aggregation_labels.items()}
| 20 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
import heapq
def UpperCamelCase_( lowerCamelCase_ ) -> set[int]:
_lowercase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowercase : List[str] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowercase : Any = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowercase : List[str] = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 21 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str = """bloom"""
_lowerCamelCase : Tuple = ["""past_key_values"""]
_lowerCamelCase : int = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : str , snake_case_ : Dict=2_5_0_8_8_0 , snake_case_ : int=6_4 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=8 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : str=0.0_2 , snake_case_ : Tuple=True , snake_case_ : Tuple=1 , snake_case_ : List[str]=2 , snake_case_ : List[str]=False , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Dict=1 , snake_case_ : Tuple=False , **snake_case_ : Any , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop("n_embed" , snake_case_ )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = apply_residual_connection_post_layernorm
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = version.parse("""1.12""" )
def __init__( self : str , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ):
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , "pad_token_id" , snake_case_ ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case_ , direction="inputs" , inverted_values_shape=snake_case_ )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase ( self : List[Any] ):
return self._config.n_layer
@property
def lowercase ( self : Optional[int] ):
return self._config.n_head
@property
def lowercase ( self : List[Any] ):
return 1e-3
def lowercase ( self : Optional[int] , snake_case_ : "PreTrainedTokenizer" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
_UpperCAmelCase = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Optional[Any] ):
return 1_3
| 22 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__: Tuple = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCamelCase__: List[Any] = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCamelCase__: List[Any] = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = RoFormerTokenizer
def __init__( self : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Optional[int]=None , __snake_case : List[Any]=True , __snake_case : Optional[Any]="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : List[str]="[MASK]" , __snake_case : Optional[Any]=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __snake_case ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __snake_case ) != strip_accents
):
UpperCAmelCase : List[str] = getattr(__snake_case , pre_tok_state.pop('''type''' ) )
UpperCAmelCase : Union[str, Any] = do_lower_case
UpperCAmelCase : int = strip_accents
UpperCAmelCase : Optional[int] = pre_tok_class(**__snake_case )
UpperCAmelCase : int = do_lower_case
def __getstate__( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.__dict__.copy()
UpperCAmelCase : List[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : int ) -> Optional[int]:
UpperCAmelCase : Tuple = d
UpperCAmelCase : List[Any] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCAmelCase : Tuple = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def A ( self : List[str] , __snake_case : str , __snake_case : str=None ) -> Optional[int]:
UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def A ( self : str , __snake_case : List[str] , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Union[str, Any]=False , **__snake_case : List[Any] , ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 23 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ( ) -> tuple[list[int], int]:
__snake_case = [randint(-1000 , 1000 ) for i in range(10 )]
__snake_case = randint(-5000 , 5000 )
return (arr, r)
snake_case_ = make_dataset()
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> tuple[int, ...]:
for triplet in permutations(snake_case_ , 3 ):
if sum(snake_case_ ) == target:
return tuple(sorted(snake_case_ ) )
return (0, 0, 0)
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> tuple[int, int, int]:
arr.sort()
__snake_case = len(snake_case_ )
for i in range(n - 1 ):
__snake_case , __snake_case = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ( ) -> tuple[float, float]:
__snake_case = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__snake_case = '''
triplet_sum1(*dataset)
'''
__snake_case = '''
triplet_sum2(*dataset)
'''
__snake_case = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=1_0000 )
__snake_case = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=1_0000 )
return (min(snake_case_ ), min(snake_case_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case_ = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 24 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
return options
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : int = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 25 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_snake_case = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_snake_case = concatenate_datasets
_snake_case = DownloadConfig
_snake_case = DownloadManager
_snake_case = DownloadMode
_snake_case = DownloadConfig
_snake_case = DownloadMode
_snake_case = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 26 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = tempfile.mkdtemp()
__a : Dict = 8
# DPR tok
__a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__a , exist_ok=__a )
__a : List[str] = os.path.join(__a , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__a : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Any = {'unk_token': '<unk>'}
__a : int = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__a , exist_ok=__a )
__a : str = os.path.join(__a , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(__a , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_dataset()
__a : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__a : Union[str, Any] = dataset
__a : Dict = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_dataset()
__a : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__a : Any = os.path.join(self.tmpdirname , 'dataset' )
__a : Union[str, Any] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__a : int = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__a : List[Any] = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __a ) , )
return retriever
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__a : Optional[Any] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__a : Dict = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__a : Union[str, Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__a , open(__a , 'wb' ) )
__a : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__a : Optional[Any] = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : Union[str, Any] = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__a : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__a )
__a : List[str] = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
__a : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : str = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : Dict = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
__a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : Any = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : int = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : str = self.get_dummy_legacy_index_retriever()
__a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : int = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __a )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : Any = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Dict = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__a : Optional[int] = 1
__a : Any = self.get_dummy_canonical_hf_index_retriever()
__a : Optional[int] = [[5, 7], [10, 11]]
__a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever(__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a )
__a , __a , __a : Dict = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__a , __a )
self.assertIsInstance(__a , __a )
self.assertIsInstance(__a , np.ndarray )
__a : int = retriever(
__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a , return_tensors='pt' , )
__a , __a , __a , __a : List[str] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__a , torch.Tensor )
self.assertIsInstance(__a , torch.Tensor )
self.assertIsInstance(__a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_dpr_ctx_encoder_tokenizer()
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
retriever.set_ctx_encoder_tokenizer(__a )
__a : int = [[5, 7], [10, 11]]
__a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever(__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a )
self.assertEqual(
len(__a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __a ) # check for doc token related keys in dictionary.
| 27 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_lowerCamelCase : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase = getattr(A__ , A__ )
if weight_type is not None:
UpperCamelCase = getattr(A__ , A__ ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A__ )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , A__ )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , A__=True ) -> int:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = UniSpeechSatConfig.from_pretrained(A__ )
else:
UpperCamelCase = UniSpeechSatConfig()
UpperCamelCase = ''
if is_finetuned:
UpperCamelCase = UniSpeechSatForCTC(A__ )
else:
UpperCamelCase = UniSpeechSatForPreTraining(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase = model[0].eval()
recursively_load_weights(A__ , A__ )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCamelCase : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 28 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : List[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_snake_case : Any = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_snake_case : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self ) -> str:
return f"{self.framework}-transfromers-test"
@property
def __UpperCAmelCase ( self ) -> str:
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 29 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """bert-base-cased"""
__SCREAMING_SNAKE_CASE : Any = """fp16"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """bf16"""
__SCREAMING_SNAKE_CASE : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Any = F"""{i + 1}"""
_UpperCAmelCase : str = strategy
with mockenv_context(**A ):
_UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _A ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**A ):
_UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _A ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
_UpperCAmelCase : Optional[int] = self.dist_env.copy()
_UpperCAmelCase : Tuple = state_dict_type
with mockenv_context(**A ):
_UpperCAmelCase : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase : Optional[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase : Any = "2000"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Tuple = "TRANSFORMER_BASED_WRAP"
_UpperCAmelCase : Tuple = "T5Layer"
with mockenv_context(**A ):
_UpperCAmelCase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = "SIZE_BASED_WRAP"
_UpperCAmelCase : List[str] = "0"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _A ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : str = mp_dtype
with mockenv_context(**A ):
_UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase : Union[str, Any] = torch.bfloataa
_UpperCAmelCase : Optional[int] = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Dict = str(A ).lower()
with mockenv_context(**A ):
_UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = 0.82
_UpperCAmelCase : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_UpperCAmelCase : Tuple = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase : Tuple = 160
_UpperCAmelCase : Any = 160
_UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_UpperCAmelCase : Tuple = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(A ):
_UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase : Optional[Any] = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
_UpperCAmelCase : Optional[int] = cmd_config[:-1]
_UpperCAmelCase : List[str] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 31 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def SCREAMING_SNAKE_CASE_ ( __A : Features ) -> Optional[int]:
"""simple docstring"""
a_ : Dict = np.inf
def set_batch_size(__A : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__A , __A ):
a_ : List[Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__A , __A ):
a_ : List[Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__A , __A ) and feature.dtype == "binary":
a_ : List[Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__A , __A )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Dict:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
a_ : int = _PACKAGED_DATASETS_MODULES['parquet'][1]
a_ : Optional[Any] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
# Build iterable dataset
if self.streaming:
a_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a_ : Union[str, Any] = None
a_ : List[str] = None
a_ : Any = None
a_ : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
a_ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> int:
a_ : int = dataset
a_ : str = path_or_buf
a_ : int = batch_size or get_writer_batch_size(dataset.features )
a_ : Union[str, Any] = parquet_writer_kwargs
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
a_ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
a_ : List[str] = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
a_ : Union[str, Any] = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : BinaryIO , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
a_ : Tuple = 0
a_ : Dict = parquet_writer_kwargs.pop('path_or_buf' , SCREAMING_SNAKE_CASE__ )
a_ : str = self.dataset.features.arrow_schema
a_ : Dict = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
a_ : List[str] = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written
| 32 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowercase_ : Tuple = [1]
for i in range(2 , __snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase_ : str = []
lowercase_ : str = list(range(__snake_case ) )
# Find permutation
while factorials:
lowercase_ : int = factorials.pop()
lowercase_ , lowercase_ : List[Any] = divmod(__snake_case , __snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A =logging.get_logger(__name__)
class _a ( __a ):
__a : Dict = ["""pixel_values"""]
def __init__( self : int , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : int = 8 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
UpperCAmelCase = pad_size
def A ( self : Any , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_image_size(lowercase )
UpperCAmelCase = (old_height // size + 1) * size - old_height
UpperCAmelCase = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowercase )
def A ( self : Union[str, Any] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
UpperCAmelCase = [self.pad(lowercase , size=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 34 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
require_version(deps[pkg] , _lowerCAmelCase )
| 35 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , _lowerCamelCase ).groups()[0]
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = file_names
_lowerCAmelCase : Optional[int] = image_transform
_lowerCAmelCase : Optional[int] = label_to_id
def __len__( self):
'''simple docstring'''
return len(self.file_names)
def __getitem__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.file_names[idx]
_lowerCAmelCase : Any = PIL.Image.open(__a)
_lowerCAmelCase : str = raw_image.convert("RGB")
if self.image_transform is not None:
_lowerCAmelCase : Optional[Any] = self.image_transform(__a)
_lowerCAmelCase : Optional[int] = extract_label(__a)
if self.label_to_id is not None:
_lowerCAmelCase : List[str] = self.label_to_id[label]
return {"image": image, "label": label}
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if args.with_tracking:
_lowerCAmelCase : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_lowerCAmelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Optional[Any] = config["lr"]
_lowerCAmelCase : Dict = int(config["num_epochs"] )
_lowerCAmelCase : int = int(config["seed"] )
_lowerCAmelCase : Union[str, Any] = int(config["batch_size"] )
_lowerCAmelCase : List[str] = config["image_size"]
if not isinstance(_lowerCamelCase , (list, tuple) ):
_lowerCAmelCase : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_lowerCAmelCase : Union[str, Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
_lowerCAmelCase : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCAmelCase : Optional[Any] = os.path.split(_lowerCamelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase )
# Grab all the image filenames
_lowerCAmelCase : str = [os.path.join(args.data_dir , _lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_lowerCAmelCase : Optional[int] = [extract_label(_lowerCamelCase ) for fname in file_names]
_lowerCAmelCase : List[str] = list(set(_lowerCamelCase ) )
id_to_label.sort()
_lowerCAmelCase : Optional[int] = {lbl: i for i, lbl in enumerate(_lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(_lowerCamelCase )
torch.manual_seed(_lowerCamelCase )
torch.cuda.manual_seed_all(_lowerCamelCase )
# Split our filenames between train and validation
_lowerCAmelCase : Optional[int] = np.random.permutation(len(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = int(0.8 * len(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = random_perm[:cut]
_lowerCAmelCase : List[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCAmelCase : Tuple = Compose([RandomResizedCrop(_lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCAmelCase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowerCamelCase , label_to_id=_lowerCamelCase )
# For evaluation, we use a deterministic Resize
_lowerCAmelCase : Union[str, Any] = Compose([Resize(_lowerCamelCase ), ToTensor()] )
_lowerCAmelCase : Optional[int] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowerCamelCase , label_to_id=_lowerCamelCase )
# Instantiate dataloaders.
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , batch_size=_lowerCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : str = create_model("resnet50d" , pretrained=_lowerCamelCase , num_classes=len(_lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : List[Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCAmelCase : int = False
for param in model.get_classifier().parameters():
_lowerCAmelCase : Tuple = True
# We normalize the batches of images to be a bit faster.
_lowerCAmelCase : Any = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_lowerCAmelCase : Dict = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCAmelCase : Optional[int] = OneCycleLR(optimizer=_lowerCamelCase , max_lr=_lowerCamelCase , epochs=_lowerCamelCase , steps_per_epoch=len(_lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
_lowerCAmelCase : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCAmelCase : int = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCAmelCase : List[str] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCAmelCase : List[Any] = os.path.splitext(_lowerCamelCase )[0]
if "epoch" in training_difference:
_lowerCAmelCase : Any = int(training_difference.replace("epoch_" , "" ) ) + 1
_lowerCAmelCase : Union[str, Any] = None
else:
_lowerCAmelCase : Any = int(training_difference.replace("step_" , "" ) )
_lowerCAmelCase : Any = resume_step // len(_lowerCamelCase )
resume_step -= starting_epoch * len(_lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
if args.with_tracking:
_lowerCAmelCase : Union[str, Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCAmelCase : Any = accelerator.skip_first_batches(_lowerCamelCase , _lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase : Dict = (batch["image"] - mean) / std
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = torch.nn.functional.cross_entropy(_lowerCamelCase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCAmelCase : List[Any] = os.path.join(args.output_dir , _lowerCamelCase )
accelerator.save_state(_lowerCamelCase )
model.eval()
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase : str = (batch["image"] - mean) / std
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_lowerCamelCase )
_lowerCAmelCase : Dict = outputs.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch["label"]) )
_lowerCAmelCase : int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCAmelCase : List[str] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(_lowerCamelCase ),
"epoch": epoch,
} , step=_lowerCamelCase , )
if checkpointing_steps == "epoch":
_lowerCAmelCase : int = F"epoch_{epoch}"
if args.output_dir is not None:
_lowerCAmelCase : List[Any] = os.path.join(args.output_dir , _lowerCamelCase )
accelerator.save_state(_lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=_lowerCamelCase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=_lowerCamelCase , default=_lowerCamelCase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=_lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowerCamelCase , default=_lowerCamelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_lowerCAmelCase : Dict = parser.parse_args()
_lowerCAmelCase : Any = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 36 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
_lowerCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase__ : Optional[Any] = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase__ : List[str] = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase__ : str = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 37 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
UpperCamelCase :Tuple = 2**power
UpperCamelCase :List[Any] = str(__magic_name__ )
UpperCamelCase :List[str] = list(__magic_name__ )
UpperCamelCase :Optional[int] = 0
for i in list_num:
sum_of_num += int(__magic_name__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
UpperCAmelCase_ : str = solution(power)
print('''Sum of the digits is: ''', result)
| 38 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionDiffEditPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
_UpperCAmelCase = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('RGB' )
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe_loaded(**UpperCAmelCase )[0]
_UpperCAmelCase = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_mask_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.generate_mask(**UpperCAmelCase )
_UpperCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_UpperCAmelCase = np.array([0] * 9 )
_UpperCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.invert(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
_UpperCAmelCase = DPMSolverMultistepScheduler(**UpperCAmelCase )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
_UpperCAmelCase = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe.invert(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_UpperCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_UpperCAmelCase = raw_image.convert('RGB' ).resize((768, 768) )
_UpperCAmelCase = raw_image
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 39 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.