code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
def UpperCAmelCase ( a_ = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> Dict: """simple docstring""" try: __A = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) __A = 1 __A = 2 while i * i <= n: while n % i == 0: __A = i n //= i i += 1 if n > 1: __A = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
15
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
0
# Lint as: python3 import itertools import os import re lowerCamelCase__ = re.compile(r"""([A-Z]+)([A-Z][a-z])""") lowerCamelCase__ = re.compile(r"""([a-z\d])([A-Z])""") lowerCamelCase__ = re.compile(r"""(?<!_)_(?!_)""") lowerCamelCase__ = re.compile(r"""(_{2,})""") lowerCamelCase__ = r"""^\w+(\.\w+)*$""" lowerCamelCase__ = r"""<>:/\|?*""" def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" __a = _uppercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase__ ) __a = _lowercase_uppercase_re.sub(r"""\1_\2""" , UpperCamelCase__ ) return name.lower() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" __a = _single_underscore_re.split(UpperCamelCase__ ) __a = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != """""" ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if os.path.basename(UpperCamelCase__ ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" if os.path.basename(UpperCamelCase__ ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , UpperCamelCase__ ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(UpperCamelCase__ )}-{split}" def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=None ): """simple docstring""" __a = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ ) if filetype_suffix: prefix += f".{filetype_suffix}" __a = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) return f"{filepath}*" def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : List[Any]=None ): """simple docstring""" __a = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ ) __a = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if shard_lengths: __a = len(UpperCamelCase__ ) __a = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase__ )] if filetype_suffix: __a = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: __a = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
302
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
0
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _snake_case ( lowerCamelCase__ : int ) -> Optional[Any]: lowerCamelCase_ : Optional[int] =prime_factors(UpperCamelCase__ ) if is_square_free(UpperCamelCase__ ): return -1 if len(UpperCamelCase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
144
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
0
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->List[str]: return int(input_a == input_a == 0 ) def UpperCamelCase__( )->str: print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" ) print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" ) print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" ) print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" ) if __name__ == "__main__": import doctest doctest.testmod() main()
193
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __SCREAMING_SNAKE_CASE :List[str] = logging.getLogger(__name__) class A_ ( lowerCAmelCase_ ): def __init__( self : List[Any] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[int]=None ): super().__init__( __lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , ) _UpperCAmelCase = None def lowercase ( self : Tuple , snake_case_ : Optional[Any] ): logger.info("initializing retrieval" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("dist initialized" ) # needs to be set manually _UpperCAmelCase = self._infer_socket_ifname() # avoid clash with the NCCL port _UpperCAmelCase = str(distributed_port + 1 ) _UpperCAmelCase = dist.new_group(ranks=__lowerCamelCase , backend="gloo" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("dist not initialized / main" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def lowercase ( self : Optional[int] ): return dist.get_rank(group=self.process_group ) == 0 def lowercase ( self : Dict , snake_case_ : Any , snake_case_ : Any , snake_case_ : Tuple=torch.floataa ): _UpperCAmelCase = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase ) dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group ) return target_tensor def lowercase ( self : int ): _UpperCAmelCase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names _UpperCAmelCase = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase ) return ifname def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ): # single GPU training if not dist.is_initialized(): _UpperCAmelCase = self._main_retrieve(__lowerCamelCase , __lowerCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase ) # distributed training _UpperCAmelCase = dist.get_world_size(group=self.process_group ) # gather logic _UpperCAmelCase = None if self._is_main(): _UpperCAmelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )] dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group ) # scatter logic _UpperCAmelCase = question_hidden_states.shape[0] _UpperCAmelCase = [] _UpperCAmelCase = [] if self._is_main(): assert len(__lowerCamelCase ) == world_size _UpperCAmelCase = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase ) _UpperCAmelCase = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase ) _UpperCAmelCase = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa ) _UpperCAmelCase = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
22
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A ={ 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =['ConvNextFeatureExtractor'] __A =['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __A =_LazyModule(__name__, globals()['__file__'], _import_structure)
163
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __A : List[str] = None __A : List[str] = logging.get_logger(__name__) __A : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __A : str = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } __A : Optional[Any] = { '''facebook/mbart-large-en-ro''': 1_024, '''facebook/mbart-large-cc25''': 1_024, } # fmt: off __A : Tuple = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ : List[str] = MBartTokenizer SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Optional[int] = [] def __init__( self : Optional[int] , A : Any=None , A : Union[str, Any]=None , A : Optional[int]="<s>" , A : Dict="</s>" , A : Union[str, Any]="</s>" , A : Tuple="<s>" , A : List[Any]="<unk>" , A : Optional[Any]="<pad>" , A : List[Any]="<mask>" , A : str=None , A : Tuple=None , A : Optional[Any]=None , **A : Union[str, Any] , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it lowercase_ : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) lowercase_ : Union[str, Any] = vocab_file lowercase_ : int = False if not self.vocab_file else True lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowercase_ : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase_ : Optional[int] = src_lang if src_lang is not None else "en_XX" lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) lowercase_ : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self : int ) -> str: return self._src_lang @src_lang.setter def A ( self : Tuple , A : Union[str, Any] ) -> None: lowercase_ : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self : List[Any] , A : str , A : str = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : Dict , A : List[Any] , A : Union[str, Any] = None ) -> List[int]: lowercase_ : List[str] = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self : Tuple , A : Dict , A : Tuple , A : Optional[Any] , A : Any , **A : str ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowercase_ : str = src_lang lowercase_ : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) lowercase_ : Tuple = self.convert_tokens_to_ids(__lowerCamelCase ) lowercase_ : Dict = tgt_lang_id return inputs def A ( self : Optional[int] , A : Union[str, Any] , A : Dict = "en_XX" , A : Union[str, Any] = None , A : Dict = "ro_RO" , **A : Dict , ) -> BatchEncoding: lowercase_ : Any = src_lang lowercase_ : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) def A ( self : List[Any] ) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang ) def A ( self : Any ) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : List[Any] , A : List[Any] ) -> None: lowercase_ : int = self.convert_tokens_to_ids(__lowerCamelCase ) lowercase_ : int = [] lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code] lowercase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase_ : str = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : Union[str, Any] , A : List[str] ) -> None: lowercase_ : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase ) lowercase_ : List[Any] = [] lowercase_ : str = [self.eos_token_id, self.cur_lang_code] lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) lowercase_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) lowercase_ : str = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : Optional[Any] , A : Optional[int] , A : Tuple = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowercase_ : int = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) return (out_vocab_file,)
33
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
0
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = BioGptModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ) __lowerCAmelCase = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): __lowerCAmelCase = BioGptForCausalLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __a , __a , __a , __a , __a , *__a ): __lowerCAmelCase = BioGptModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() # create attention mask __lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase ) __lowerCAmelCase = self.seq_length // 2 __lowerCAmelCase = 0 # first forward pass __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __lowerCAmelCase = ids_tensor((1,) , __lowerCamelCase ).item() + 1 __lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __lowerCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCamelCase )] , dim=1 , ) # get two different outputs __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"] __lowerCAmelCase = model(__lowerCamelCase , past_key_values=__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) ) def snake_case ( self , __a , __a , __a , __a , __a , *__a ): __lowerCAmelCase = BioGptModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval() __lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase ) # first forward pass __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase ) __lowerCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"] __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[ "last_hidden_state" ] # select random slice __lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) ) def snake_case ( self , __a , __a , __a , __a , __a , *__a , __a=False ): __lowerCAmelCase = BioGptForCausalLM(__lowerCamelCase ) model.to(__lowerCamelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() __lowerCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def snake_case ( self , __a , *__a ): __lowerCAmelCase = BioGptModel(__lowerCamelCase ) __lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def snake_case ( self , __a , __a , __a , __a , __a , *__a ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = BioGptForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self ): __lowerCAmelCase = self.prepare_config_and_inputs() ( __lowerCAmelCase ) = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : int =( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __UpperCAmelCase : Dict =(BioGptForCausalLM,) if is_torch_available() else () __UpperCAmelCase : Optional[Any] =( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : List[str] =False def snake_case ( self ): __lowerCAmelCase = BioGptModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase = type self.model_tester.create_and_check_model(*__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__lowerCamelCase , gradient_checkpointing=__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCamelCase ) @slow def snake_case ( self ): __lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__lowerCamelCase ) __lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __lowerCAmelCase = "left" # Define PAD Token = EOS Token = 50256 __lowerCAmelCase = tokenizer.eos_token __lowerCAmelCase = model.config.eos_token_id # use different length sentences to test batching __lowerCAmelCase = [ "Hello, my dog is a little", "Today, I", ] __lowerCAmelCase = tokenizer(__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase ) __lowerCAmelCase = inputs["input_ids"].to(__lowerCamelCase ) __lowerCAmelCase = model.generate( input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"].to(__lowerCamelCase ) , ) __lowerCAmelCase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(__lowerCamelCase ) __lowerCAmelCase = model.generate(input_ids=__lowerCamelCase ) __lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __lowerCAmelCase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(__lowerCamelCase ) __lowerCAmelCase = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings ) __lowerCAmelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) __lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase ) __lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase ) __lowerCAmelCase = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] ) @slow def snake_case ( self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = BioGptModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = 3 __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = input_ids.ne(1 ).to(__lowerCamelCase ) __lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowerCAmelCase = BioGptForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase = 3 __lowerCAmelCase = "multi_label_classification" __lowerCAmelCase = input_dict["input_ids"] __lowerCAmelCase = input_ids.ne(1 ).to(__lowerCamelCase ) __lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __lowerCAmelCase = BioGptForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self ): __lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __lowerCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) __lowerCAmelCase = model(__lowerCamelCase )[0] __lowerCAmelCase = 4_23_84 __lowerCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __lowerCamelCase ) __lowerCAmelCase = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 10.45_57], [-11.04_69, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): __lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__lowerCamelCase ) torch.manual_seed(0 ) __lowerCAmelCase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(__lowerCamelCase ) __lowerCAmelCase = model.generate( **__lowerCamelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__lowerCamelCase , ) __lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase ) __lowerCAmelCase = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(__lowerCamelCase , __lowerCamelCase )
57
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[Any] = """dandelin/vilt-b32-finetuned-vqa""" _SCREAMING_SNAKE_CASE :Optional[Any] = ( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) _SCREAMING_SNAKE_CASE :Union[str, Any] = """image_qa""" _SCREAMING_SNAKE_CASE :Union[str, Any] = AutoProcessor _SCREAMING_SNAKE_CASE :Any = AutoModelForVisualQuestionAnswering _SCREAMING_SNAKE_CASE :List[Any] = ["""image""", """text"""] _SCREAMING_SNAKE_CASE :Union[str, Any] = ["""text"""] def __init__( self , *_a , **_a ) -> int: """simple docstring""" requires_backends(self , ["""vision"""] ) super().__init__(*__lowerCamelCase , **__lowerCamelCase ) def _a ( self , _a , _a ) -> Tuple: """simple docstring""" return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" ) def _a ( self , _a ) -> List[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__lowerCamelCase ).logits def _a ( self , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
132
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
0
import pickle import numpy as np from matplotlib import pyplot as plt class __lowerCAmelCase : """simple docstring""" def __init__( self : List[str] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Dict=0.2 , _snake_case : Tuple=0.2 ): __lowercase : Optional[int] = bp_numa __lowercase : Dict = bp_numa __lowercase : Tuple = bp_numa __lowercase : List[str] = conva_get[:2] __lowercase : Tuple = conva_get[2] __lowercase : Optional[int] = size_pa __lowercase : Optional[Any] = rate_w __lowercase : Optional[Any] = rate_t __lowercase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] __lowercase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __lowercase : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) __lowercase : Any = -2 * np.random.rand(self.conva[1] ) + 1 __lowercase : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1 __lowercase : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1 def snake_case_ ( self : Optional[Any] , _snake_case : int ): # save model dict with pickle __lowercase : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , '''wb''' ) as f: pickle.dump(__lowerCamelCase , __lowerCamelCase ) print(F'Model saved: {save_path}' ) @classmethod def snake_case_ ( cls : Tuple , _snake_case : Dict ): # read saved model with open(__lowerCamelCase , '''rb''' ) as f: __lowercase : Any = pickle.load(__lowerCamelCase ) # noqa: S301 __lowercase : Optional[int] = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) __lowercase : str = model_dic.get('''size_pooling1''' ) __lowercase : List[str] = model_dic.get('''num_bp1''' ) __lowercase : Union[str, Any] = model_dic.get('''num_bp2''' ) __lowercase : List[Any] = model_dic.get('''num_bp3''' ) __lowercase : Dict = model_dic.get('''rate_weight''' ) __lowercase : List[Any] = model_dic.get('''rate_thre''' ) # create model instance __lowercase : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # modify model parameter __lowercase : List[Any] = model_dic.get('''w_conv1''' ) __lowercase : Union[str, Any] = model_dic.get('''wkj''' ) __lowercase : str = model_dic.get('''vji''' ) __lowercase : List[str] = model_dic.get('''thre_conv1''' ) __lowercase : Optional[Any] = model_dic.get('''thre_bp2''' ) __lowercase : Dict = model_dic.get('''thre_bp3''' ) return conv_ins def snake_case_ ( self : Tuple , _snake_case : Any ): return 1 / (1 + np.exp(-1 * x )) def snake_case_ ( self : Dict , _snake_case : Union[str, Any] ): return round(__lowerCamelCase , 3 ) def snake_case_ ( self : Optional[Any] , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[int] ): # convolution process __lowercase : Tuple = convs[0] __lowercase : Union[str, Any] = convs[1] __lowercase : List[Any] = np.shape(__lowerCamelCase )[0] # get the data slice of original image data, data_focus __lowercase : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ): __lowercase : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix __lowercase : Optional[Any] = [] __lowercase : Optional[int] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowerCamelCase ): __lowercase : Optional[int] = [] for i_focus in range(len(__lowerCamelCase ) ): __lowercase : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase ) ) __lowercase : Optional[Any] = np.asmatrix(__lowerCamelCase ).reshape( __lowerCamelCase , __lowerCamelCase ) data_featuremap.append(__lowerCamelCase ) # expanding the data slice to One dimenssion __lowercase : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase ) ) __lowercase : Dict = np.asarray(__lowerCamelCase ) return focus_list, data_featuremap def snake_case_ ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple="average_pool" ): # pooling process __lowercase : Optional[Any] = len(featuremaps[0] ) __lowercase : str = int(size_map / size_pooling ) __lowercase : Optional[int] = [] for i_map in range(len(__lowerCamelCase ) ): __lowercase : int = featuremaps[i_map] __lowercase : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase ): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase ): __lowercase : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase ) ) __lowercase : Tuple = np.asmatrix(__lowerCamelCase ).reshape(__lowerCamelCase , __lowerCamelCase ) featuremap_pooled.append(__lowerCamelCase ) return featuremap_pooled def snake_case_ ( self : int , _snake_case : str ): # expanding three dimension data to one dimension list __lowercase : Tuple = [] for i in range(len(__lowerCamelCase ) ): __lowercase : Union[str, Any] = np.shape(data[i] ) __lowercase : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) __lowercase : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase ) __lowercase : Optional[Any] = np.asarray(__lowerCamelCase ) return data_expanded def snake_case_ ( self : str , _snake_case : Optional[Any] ): # expanding matrix to one dimension list __lowercase : List[Any] = np.asarray(__lowerCamelCase ) __lowercase : Union[str, Any] = np.shape(__lowerCamelCase ) __lowercase : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def snake_case_ ( self : Dict , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str ): __lowercase : Dict = [] __lowercase : Any = 0 for i_map in range(__lowerCamelCase ): __lowercase : Union[str, Any] = np.ones((size_map, size_map) ) for i in range(0 , __lowerCamelCase , __lowerCamelCase ): for j in range(0 , __lowerCamelCase , __lowerCamelCase ): __lowercase : List[Any] = pd_pool[ i_pool ] __lowercase : Tuple = i_pool + 1 __lowercase : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(__lowerCamelCase ) return pd_all def snake_case_ ( self : str , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : str=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(__lowerCamelCase )) ) print((''' - - Shape: Teach_Data ''', np.shape(__lowerCamelCase )) ) __lowercase : Tuple = 0 __lowercase : Dict = [] __lowercase : Optional[Any] = 1_0000 while rp < n_repeat and mse >= error_accuracy: __lowercase : Union[str, Any] = 0 print(F'-------------Learning Time {rp}--------------' ) for p in range(len(__lowerCamelCase ) ): # print('------------Learning Image: %d--------------'%p) __lowercase : str = np.asmatrix(datas_train[p] ) __lowercase : Union[str, Any] = np.asarray(datas_teach[p] ) __lowercase : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowercase : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga ) __lowercase : Optional[int] = np.shape(__lowerCamelCase ) __lowercase : List[str] = self._expand(__lowerCamelCase ) __lowercase : Tuple = data_bp_input __lowercase : int = np.dot(__lowerCamelCase , self.vji.T ) - self.thre_bpa __lowercase : List[Any] = self.sig(__lowerCamelCase ) __lowercase : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T ) - self.thre_bpa __lowercase : List[str] = self.sig(__lowerCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- __lowercase : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) ) __lowercase : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj ) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) ) __lowercase : Union[str, Any] = np.dot(__lowerCamelCase , self.vji ) __lowercase : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) __lowercase : Dict = pd_conva_pooled.T.getA().tolist() __lowercase : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): __lowercase : int = self._expand_mat(pd_conva_all[k_conv] ) __lowercase : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase ) __lowercase : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) __lowercase : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer __lowercase : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight __lowercase : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight __lowercase : Tuple = self.thre_bpa - pd_k_all * self.rate_thre __lowercase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image __lowercase : Optional[int] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) __lowercase : Any = rp + 1 __lowercase : Dict = error_count / patterns all_mse.append(__lowerCamelCase ) def draw_error(): __lowercase : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowerCamelCase , '''+-''' ) plt.plot(__lowerCamelCase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(__lowerCamelCase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F' - - Mse: {mse:.6f}') ) if draw_e: draw_error() return mse def snake_case_ ( self : Tuple , _snake_case : Any ): # model predict __lowercase : Union[str, Any] = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(__lowerCamelCase )) ) for p in range(len(__lowerCamelCase ) ): __lowercase : int = np.asmatrix(datas_test[p] ) __lowercase : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowercase : str = self.pooling(__lowerCamelCase , self.size_poolinga ) __lowercase : Optional[int] = self._expand(__lowerCamelCase ) __lowercase : List[Any] = data_bp_input __lowercase : Optional[int] = bp_outa * self.vji.T - self.thre_bpa __lowercase : int = self.sig(__lowerCamelCase ) __lowercase : int = bp_outa * self.wkj.T - self.thre_bpa __lowercase : Optional[int] = self.sig(__lowerCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) __lowercase : int = [list(map(self.do_round , __lowerCamelCase ) ) for each in produce_out] return np.asarray(__lowerCamelCase ) def snake_case_ ( self : Dict , _snake_case : Tuple ): # return the data of image after convoluting process so we can check it out __lowercase : Optional[int] = np.asmatrix(__lowerCamelCase ) __lowercase : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowercase : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
156
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
0
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar("T") def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]: return (position - 1) // 2 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str: return (2 * position) + 1 def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]: return (2 * position) + 2 class UpperCAmelCase (Generic[T] ): """simple docstring""" def __init__( self ): lowercase__: list[tuple[T, int]] = [] lowercase__: dict[T, int] = {} lowercase__: int = 0 def __len__( self ): return self.elements def __repr__( self ): return str(self.heap ) def _snake_case ( self ): # Check if the priority queue is empty return self.elements == 0 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): # Add an element with given priority to the queue self.heap.append((elem, weight) ) lowercase__: int = self.elements self.elements += 1 self._bubble_up(__lowerCamelCase ) def _snake_case ( self ): # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) lowercase__: Optional[Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowercase__: str = self.heap[0] self._bubble_down(__lowerCamelCase ) return elem def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): # Update the weight of the given key lowercase__: Optional[Any] = self.position_map[elem] lowercase__: Any = (elem, weight) if position > 0: lowercase__: Any = get_parent_position(__lowerCamelCase ) lowercase__: Tuple = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCamelCase ) else: self._bubble_down(__lowerCamelCase ) else: self._bubble_down(__lowerCamelCase ) def _snake_case ( self , _UpperCAmelCase ): # Place a node at the proper position (upward movement) [to be used internally # only] lowercase__: Optional[int] = self.position_map[elem] if curr_pos == 0: return None lowercase__: Optional[Any] = get_parent_position(__lowerCamelCase ) lowercase__: Any = self.heap[curr_pos] lowercase__: str = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_up(__lowerCamelCase ) return None def _snake_case ( self , _UpperCAmelCase ): # Place a node at the proper position (downward movement) [to be used # internally only] lowercase__: Any = self.position_map[elem] lowercase__: Optional[int] = self.heap[curr_pos] lowercase__: Optional[int] = get_child_left_position(__lowerCamelCase ) lowercase__: List[str] = get_child_right_position(__lowerCamelCase ) if child_left_position < self.elements and child_right_position < self.elements: lowercase__: str = self.heap[child_left_position] lowercase__: List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) if child_left_position < self.elements: lowercase__: int = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) else: return None if child_right_position < self.elements: lowercase__: int = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) return None def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): # Swap the nodes at the given positions lowercase__: str = self.heap[nodea_pos][0] lowercase__: List[Any] = self.heap[nodea_pos][0] lowercase__: Dict = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowercase__: Tuple = nodea_pos lowercase__: int = nodea_pos class UpperCAmelCase (Generic[T] ): """simple docstring""" def __init__( self ): lowercase__: dict[T, dict[T, int]] = {} lowercase__: int = 0 def __repr__( self ): return str(self.connections ) def __len__( self ): return self.nodes def _snake_case ( self , _UpperCAmelCase ): # Add a node in the graph if it is not in the graph if node not in self.connections: lowercase__: Optional[int] = {} self.nodes += 1 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Add an edge between 2 nodes in the graph self.add_node(__lowerCamelCase ) self.add_node(__lowerCamelCase ) lowercase__: Optional[int] = weight lowercase__: str = weight def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , ) -> Any: lowercase__: dict[T, int] = {node: maxsize for node in graph.connections} lowercase__: dict[T, T | None] = {node: None for node in graph.connections} lowercase__: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(UpperCamelCase__ , UpperCamelCase__ ) if priority_queue.is_empty(): return dist, parent # initialization lowercase__: List[str] = priority_queue.extract_min() lowercase__: Tuple = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowercase__: Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase__ , dist[neighbour] ) lowercase__: int = node # running prim's algorithm while not priority_queue.is_empty(): lowercase__: Optional[Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowercase__: Optional[Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase__ , dist[neighbour] ) lowercase__: Any = node return dist, parent
177
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *A : Dict ,**A : Optional[int] ): pass @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def UpperCamelCase_ ( self : int ,A : str ,A : Any ,A : str ): __A = ObjectDetectionPipeline(model=__lowerCamelCase ,image_processor=__lowerCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ,A : List[str] ): __A = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" ,threshold=0.0 ) self.assertGreater(len(__lowerCamelCase ) ,0 ) for detected_object in outputs: self.assertEqual( __lowerCamelCase ,{ "score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase ), "box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )}, } ,) import datasets __A = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" ,"image" ,split="test" ) __A = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] __A = object_detector(__lowerCamelCase ,threshold=0.0 ) self.assertEqual(len(__lowerCamelCase ) ,len(__lowerCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__lowerCamelCase ) ,0 ) for detected_object in outputs: self.assertEqual( __lowerCamelCase ,{ "score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase ), "box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )}, } ,) @require_tf @unittest.skip("Object detection not implemented in TF" ) def UpperCamelCase_ ( self : int ): pass @require_torch def UpperCamelCase_ ( self : Any ): __A = "hf-internal-testing/tiny-detr-mobilenetsv3" __A = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase ) __A = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ) __A = ObjectDetectionPipeline(model=__lowerCamelCase ,feature_extractor=__lowerCamelCase ) __A = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=0.0 ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ] ,) __A = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ,threshold=0.0 ,) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ], [ {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ], ] ,) @require_torch @slow def UpperCamelCase_ ( self : Any ): __A = "facebook/detr-resnet-50" __A = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase ) __A = AutoFeatureExtractor.from_pretrained(__lowerCamelCase ) __A = ObjectDetectionPipeline(model=__lowerCamelCase ,feature_extractor=__lowerCamelCase ) __A = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] ,) __A = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], ] ,) @require_torch @slow def UpperCamelCase_ ( self : Any ): __A = "facebook/detr-resnet-50" __A = pipeline("object-detection" ,model=__lowerCamelCase ) __A = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] ,) __A = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], [ {"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], ] ,) @require_torch @slow def UpperCamelCase_ ( self : Dict ): __A = 0.99_85 __A = "facebook/detr-resnet-50" __A = pipeline("object-detection" ,model=__lowerCamelCase ) __A = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ,threshold=__lowerCamelCase ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ {"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] ,) @require_torch @require_pytesseract @slow def UpperCamelCase_ ( self : Union[str, Any] ): __A = "Narsil/layoutlmv3-finetuned-funsd" __A = 0.99_93 __A = pipeline("object-detection" ,model=__lowerCamelCase ,threshold=__lowerCamelCase ) __A = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(__lowerCamelCase ,decimals=4 ) ,[ {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}}, {"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}}, ] ,)
15
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
0
from collections.abc import Sequence def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : bool = False ): """simple docstring""" if not arr: return 0 __a = 0 if allow_empty_subarrays else float("""-inf""" ) __a = 0.0 for num in arr: __a = max(0 if allow_empty_subarrays else num , curr_sum + num ) __a = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCamelCase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"""{max_subarray_sum(nums) = }""")
302
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor A__ : Tuple = logging.get_logger(__name__) class lowercase__ ( snake_case__ ): def __init__( self : Dict , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
144
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
193
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
0
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class A_ ( unittest.TestCase ): def lowercase ( self : Any ): _UpperCAmelCase = 1_0 def lowercase ( self : Any ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase ) def lowercase ( self : Dict ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(__lowerCamelCase , self.block_size , 0 ) , __lowerCamelCase ) def lowercase ( self : str ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase = process_story(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , [] ) def lowercase ( self : Dict ): _UpperCAmelCase = "" _UpperCAmelCase = process_story(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , [] ) self.assertEqual(__lowerCamelCase , [] ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase = process_story(__lowerCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def lowercase ( self : int ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 0 ).numpy() , expected.numpy() ) def lowercase ( self : Tuple ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 2_3 ).numpy() , expected.numpy() ) def lowercase ( self : Any ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowerCamelCase , 1 ).numpy() , expected.numpy() ) def lowercase ( self : str ): _UpperCAmelCase = 1_0_1 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__lowerCamelCase , __lowerCamelCase ) np.testing.assert_array_equal(__lowerCamelCase , __lowerCamelCase )
22
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
0
'''simple docstring''' import sys from collections import defaultdict class _snake_case : def __init__( self): UpperCAmelCase__ : Any = [] def snake_case__ ( self , _lowerCamelCase): return self.node_position[vertex] def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Tuple = pos def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase__ : List[str] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase__ : Dict = 2 * start + 1 else: UpperCAmelCase__ : Optional[int] = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase__ : List[str] = heap[smallest_child], positions[smallest_child] UpperCAmelCase__ : Optional[int] = ( heap[start], positions[start], ) UpperCAmelCase__ : List[Any] = temp, tempa UpperCAmelCase__ : Optional[Any] = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , __lowerCamelCase) self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = position[index] while index != 0: UpperCAmelCase__ : List[str] = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: UpperCAmelCase__ : Union[str, Any] = heap[parent] UpperCAmelCase__ : str = position[parent] self.set_position(position[parent] , __lowerCamelCase) else: UpperCAmelCase__ : Tuple = val UpperCAmelCase__ : Any = temp self.set_position(__lowerCamelCase , __lowerCamelCase) break UpperCAmelCase__ : Dict = parent else: UpperCAmelCase__ : str = val UpperCAmelCase__ : str = temp self.set_position(__lowerCamelCase , 0) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : str = len(__lowerCamelCase) // 2 - 1 for i in range(__lowerCamelCase , -1 , -1): self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , len(__lowerCamelCase) , __lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Tuple = positions[0] UpperCAmelCase__ : List[str] = sys.maxsize self.top_to_bottom(__lowerCamelCase , 0 , len(__lowerCamelCase) , __lowerCamelCase) return temp def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : int = Heap() UpperCAmelCase__ : Dict = [0] * len(UpperCamelCase__ ) UpperCAmelCase__ : Union[str, Any] = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase__ : int = [] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase__ : Dict = [] for vertex in range(len(UpperCamelCase__ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase__ ) heap.node_position.append(UpperCamelCase__ ) UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : int = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : Optional[Any] = distance heap.heapify(UpperCamelCase__ , UpperCamelCase__ ) for _ in range(1 , len(UpperCamelCase__ ) ): UpperCAmelCase__ : Optional[Any] = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase__ : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase__ )] ): UpperCAmelCase__ : Optional[Any] = distance heap.bottom_to_top( UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ ) UpperCAmelCase__ : Optional[int] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __A =int(input('Enter number of edges: ').strip()) __A =defaultdict(list) for _ in range(edges_number): __A =[int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
163
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
0
"""simple docstring""" __A : Optional[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __A : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __A : Dict = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def lowercase ( __snake_case : int , __snake_case : int , __snake_case : int ): assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 1_2, "month should be between 1 to 12" assert 1 <= day <= 3_1, "day should be between 1 to 31" # Doomsday algorithm: lowercase_ : List[Any] = year // 1_0_0 lowercase_ : List[str] = (5 * (century % 4) + 2) % 7 lowercase_ : str = year % 1_0_0 lowercase_ : List[Any] = centurian % 1_2 lowercase_ : Tuple = ( (centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 lowercase_ : Optional[Any] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0) else DOOMSDAY_LEAP[month - 1] ) lowercase_ : Tuple = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
33
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A : List[Any] = logging.get_logger(__name__) A : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} A : Union[str, Any] = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } A : Tuple = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : str =VOCAB_FILES_NAMES __UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Any =["""input_ids""", """attention_mask"""] __UpperCAmelCase : Optional[int] =GPTaTokenizer def __init__( self , __a=None , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ): super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) __lowerCAmelCase = kwargs.pop("add_bos_token" , __lowerCamelCase ) __lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space: __lowerCAmelCase = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) ) __lowerCAmelCase = add_prefix_space __lowerCAmelCase = pre_tok_class(**__lowerCamelCase ) __lowerCAmelCase = add_prefix_space def snake_case ( self , *__a , **__a ): __lowerCAmelCase = kwargs.get("is_split_into_words" , __lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def snake_case ( self , *__a , **__a ): __lowerCAmelCase = kwargs.get("is_split_into_words" , __lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def snake_case ( self , __a , __a = None ): __lowerCAmelCase = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def snake_case ( self , __a ): __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] ) if len(__lowerCamelCase ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
57
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
0
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration a :List[str] = 50_000 a :Union[str, Any] = 5_000 a ,a :Dict = os.path.split(__file__) a :Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = dataset[i] @get_duration def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int = dataset[i : i + batch_size] @get_duration def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = dataset[i] @get_duration def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Dict = dataset[i : i + batch_size] def _lowercase ( ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES} SCREAMING_SNAKE_CASE__ : List[Any] = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] SCREAMING_SNAKE_CASE__ : str = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) SCREAMING_SNAKE_CASE__ : List[Any] = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) SCREAMING_SNAKE_CASE__ : Dict = generate_example_dataset( os.path.join(UpperCamelCase__ , """dataset.arrow""" ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={"""list""": (100,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : List[str] = func(UpperCamelCase__ , **UpperCamelCase__ ) print("""shuffling dataset""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : Dict = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , """wb""" ) as f: f.write(json.dumps(UpperCamelCase__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
132
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
from __future__ import annotations import numpy as np def UpperCAmelCase_ ( __lowerCAmelCase ) -> str: return np.maximum(0 , UpperCamelCase__ ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
156
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
0
"""simple docstring""" __A = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __A = {value: key for key, value in encode_dict.items()} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: lowercase__: Union[str, Any] = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict: if set(UpperCamelCase__ ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__: Optional[int] = "" for word in coded.split(): while len(UpperCamelCase__ ) != 0: decoded += decode_dict[word[:5]] lowercase__: Optional[int] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
177
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
0
import socket def UpperCAmelCase ( ) -> Tuple: """simple docstring""" __A = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __A = socket.gethostname() __A = 1_2_3_1_2 sock.connect((host, port) ) sock.send(b"Hello server!" ) with open("Received_file" , "wb" ) as out_file: print("File opened" ) print("Receiving data..." ) while True: __A = sock.recv(1_0_2_4 ) if not data: break out_file.write(UpperCamelCase__ ) print("Successfully received the file" ) sock.close() print("Connection closed" ) if __name__ == "__main__": main()
15
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
0
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : List[Any] ): '''simple docstring''' self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCamelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' __a = None ops.enable_eager_execution_internal() __a = tf.config.list_physical_devices("""CPU""" ) if len(__lowerCamelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a = tf.config.list_logical_devices(device_type="""CPU""" ) __a = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a = GradientAccumulator() __a = tf.Variable([4.0, 3.0] ) __a = create_optimizer(5E-5 , 10 , 5 ) __a = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase ) def accumulate_on_replica(__lowercase : List[str] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__lowercase : Optional[int] , __lowercase : List[str] ): with strategy.scope(): __a = strategy.experimental_local_results(__lowerCamelCase ) local_variables[0].assign(__lowerCamelCase ) local_variables[1].assign(__lowerCamelCase ) strategy.run(__lowerCamelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCamelCase ) def _check_local_values(__lowercase : List[str] , __lowercase : List[str] ): __a = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
302
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
0
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets A__ : List[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' A__ : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' A__ : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCAmelCase__ ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Dict=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]="auto" , snake_case__ : Optional[Any]=-1 , snake_case__ : Optional[Any]=0.9 , snake_case__ : str=5 , snake_case__ : str=500 , snake_case__ : List[Any]="gpt2-large" , snake_case__ : Union[str, Any]=-1 , snake_case__ : int=1024 , snake_case__ : Dict=25 , snake_case__ : Union[str, Any]=5 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=25 , ): lowerCamelCase_ : Optional[int] =compute_mauve( p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , ) return out
144
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
0
def UpperCamelCase__( UpperCamelCase__ : int )->Tuple: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(UpperCamelCase__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
193
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[int] = """Speech2TextFeatureExtractor""" _lowerCamelCase : int = """Speech2TextTokenizer""" def __init__( self : Tuple , snake_case_ : List[Any] , snake_case_ : Dict ): super().__init__(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False def __call__( self : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) _UpperCAmelCase = kwargs.pop("raw_speech" ) else: _UpperCAmelCase = kwargs.pop("audio" , __lowerCamelCase ) _UpperCAmelCase = kwargs.pop("sampling_rate" , __lowerCamelCase ) _UpperCAmelCase = kwargs.pop("text" , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: _UpperCAmelCase = args[0] _UpperCAmelCase = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: _UpperCAmelCase = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase ) if text is not None: _UpperCAmelCase = self.tokenizer(__lowerCamelCase , **__lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: _UpperCAmelCase = encodings["input_ids"] return inputs def lowercase ( self : Optional[int] , *snake_case_ : Any , **snake_case_ : Any ): return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def lowercase ( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @contextmanager def lowercase ( self : Dict ): warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) _UpperCAmelCase = True _UpperCAmelCase = self.tokenizer yield _UpperCAmelCase = self.feature_extractor _UpperCAmelCase = False
22
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __A =logging.get_logger(__name__) __A ={ 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class _snake_case ( a__ ): lowerCAmelCase :Optional[int] = '''dpt''' def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=384 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=[2, 5, 8, 11] , _lowerCamelCase="project" , _lowerCamelCase=[4, 2, 1, 0.5] , _lowerCamelCase=[96, 192, 384, 768] , _lowerCamelCase=256 , _lowerCamelCase=-1 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=255 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 1024, 24, 24] , _lowerCamelCase=[0, 1] , _lowerCamelCase=None , **_lowerCamelCase , ): super().__init__(**__lowerCamelCase) UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("""Initializing the config with a `BiT` backbone.""") UpperCAmelCase__ : int = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } UpperCAmelCase__ : Dict = BitConfig(**__lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): logger.info("""Initializing the config with a `BiT` backbone.""") UpperCAmelCase__ : Optional[Any] = BitConfig(**__lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): UpperCAmelCase__ : List[str] = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''') UpperCAmelCase__ : Any = backbone_featmap_shape UpperCAmelCase__ : Optional[Any] = neck_ignore_stages if readout_type != "project": raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""") else: UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : int = [] UpperCAmelCase__ : Any = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : Optional[Any] = layer_norm_eps UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Any = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Union[str, Any] = qkv_bias UpperCAmelCase__ : List[Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""") UpperCAmelCase__ : int = readout_type UpperCAmelCase__ : Union[str, Any] = reassemble_factors UpperCAmelCase__ : str = neck_hidden_sizes UpperCAmelCase__ : Optional[Any] = fusion_hidden_size UpperCAmelCase__ : Union[str, Any] = head_in_index UpperCAmelCase__ : List[str] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) UpperCAmelCase__ : str = use_auxiliary_head UpperCAmelCase__ : Union[str, Any] = auxiliary_loss_weight UpperCAmelCase__ : Any = semantic_loss_ignore_index UpperCAmelCase__ : Any = semantic_classifier_dropout def snake_case__ ( self): UpperCAmelCase__ : str = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: UpperCAmelCase__ : Optional[Any] = self.backbone_config.to_dict() UpperCAmelCase__ : Optional[int] = self.__class__.model_type return output
163
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) __A : Any = { '''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''', } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : str = "timesformer" def __init__( self : List[str] , A : List[Any]=2_24 , A : int=16 , A : int=3 , A : Tuple=8 , A : Optional[int]=7_68 , A : Any=12 , A : Optional[int]=12 , A : int=30_72 , A : Optional[Any]="gelu" , A : Optional[Any]=0.0 , A : Dict=0.0 , A : int=0.02 , A : Tuple=1e-6 , A : Optional[int]=True , A : str="divided_space_time" , A : List[Any]=0 , **A : Any , ) -> Tuple: super().__init__(**__lowerCamelCase ) lowercase_ : Any = image_size lowercase_ : Union[str, Any] = patch_size lowercase_ : Tuple = num_channels lowercase_ : Dict = num_frames lowercase_ : int = hidden_size lowercase_ : Union[str, Any] = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Union[str, Any] = hidden_act lowercase_ : List[Any] = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Tuple = initializer_range lowercase_ : Tuple = layer_norm_eps lowercase_ : str = qkv_bias lowercase_ : str = attention_type lowercase_ : Dict = drop_path_rate
33
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
0
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union A : int = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$") @total_ordering @dataclass class _UpperCamelCase : '''simple docstring''' __UpperCAmelCase : List[Any] =4_2 __UpperCAmelCase : Tuple =None __UpperCAmelCase : Optional[Any] =None __UpperCAmelCase : Any =None __UpperCAmelCase : List[str] =None def snake_case ( self ): __lowerCAmelCase = _str_to_version_tuple(self.version_str ) def __repr__( self ): return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def snake_case ( self ): return self.major, self.minor, self.patch def snake_case ( self , __a ): if isinstance(__lowerCamelCase , __lowerCamelCase ): return Version(__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): return other raise TypeError(f"{other} (type {type(__lowerCamelCase )}) cannot be compared to version." ) def __eq__( self , __a ): try: __lowerCAmelCase = self._validate_operand(__lowerCamelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self , __a ): __lowerCAmelCase = self._validate_operand(__lowerCamelCase ) return self.tuple < other.tuple def __hash__( self ): return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def snake_case ( cls , __a ): __lowerCAmelCase = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def snake_case ( self ): return self.version_str def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = _VERSION_REG.match(UpperCamelCase__ ) if not res: raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(UpperCamelCase__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
57
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a :List[Any] = logging.get_logger(__name__) a :Optional[Any] = {"vocab_file": "vocab.json"} a :Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } a :List[str] = {"mgp-str": 27} class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE :Dict = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _a , _a="[GO]" , _a="[GO]" , _a="[s]" , _a="[GO]" , **_a ) -> Optional[Any]: """simple docstring""" super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle: SCREAMING_SNAKE_CASE__ : Optional[Any] = json.load(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in self.vocab.items()} @property def _a ( self ) -> Union[str, Any]: """simple docstring""" return len(self.vocab ) def _a ( self ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def _a ( self , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [] for s in text: char_tokens.extend(__lowerCamelCase ) return char_tokens def _a ( self , _a ) -> List[Any]: """simple docstring""" return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) ) def _a ( self , _a ) -> Dict: """simple docstring""" return self.decoder.get(__lowerCamelCase ) def _a ( self , _a , _a = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__lowerCamelCase ): logger.error("""Vocabulary path ({}) should be a directory""".format(__lowerCamelCase ) ) return SCREAMING_SNAKE_CASE__ : Dict = os.path.join( __lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + """\n""" ) return (vocab_file,)
132
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) __lowerCAmelCase : List[Any] = logging.getLogger(__name__) def UpperCAmelCase_ ( __lowerCAmelCase ) -> Any: __lowercase : Dict = git.Repo(search_parent_directories=UpperCamelCase__ ) __lowercase : Optional[Any] = { "repo_id": str(UpperCamelCase__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(UpperCamelCase__ , '''git_log.json''' ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=4 ) def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[Any]: if params.n_gpu <= 0: __lowercase : Union[str, Any] = 0 __lowercase : Dict = -1 __lowercase : Dict = True __lowercase : str = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 __lowercase : Any = int(os.environ['''WORLD_SIZE'''] ) __lowercase : List[Any] = int(os.environ['''N_GPU_NODE'''] ) __lowercase : Any = int(os.environ['''RANK'''] ) # number of nodes / node ID __lowercase : int = params.world_size // params.n_gpu_per_node __lowercase : str = params.global_rank // params.n_gpu_per_node __lowercase : Any = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 __lowercase : Dict = 1 __lowercase : List[str] = 0 __lowercase : int = 0 __lowercase : List[Any] = 0 __lowercase : str = 1 __lowercase : Union[str, Any] = 1 __lowercase : Any = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __lowercase : str = params.node_id == 0 and params.local_rank == 0 __lowercase : Any = params.n_nodes > 1 # summary __lowercase : Optional[Any] = F'--- Global rank: {params.global_rank} - ' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
156
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
177
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Optional[Any] = { 'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = "yolos" def __init__( self : Optional[Any] ,A : Dict=7_68 ,A : int=12 ,A : Optional[int]=12 ,A : Optional[int]=30_72 ,A : Optional[Any]="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Union[str, Any]=0.02 ,A : Optional[Any]=1E-12 ,A : Dict=[5_12, 8_64] ,A : Dict=16 ,A : List[Any]=3 ,A : List[Any]=True ,A : Any=1_00 ,A : Any=True ,A : Tuple=False ,A : Optional[Any]=1 ,A : List[Any]=5 ,A : Tuple=2 ,A : List[str]=5 ,A : Optional[Any]=2 ,A : Optional[int]=0.1 ,**A : Optional[int] ,): super().__init__(**__lowerCamelCase ) __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = initializer_range __A = layer_norm_eps __A = image_size __A = patch_size __A = num_channels __A = qkv_bias __A = num_detection_tokens __A = use_mid_position_embeddings __A = auxiliary_loss # Hungarian matcher __A = class_cost __A = bbox_cost __A = giou_cost # Loss coefficients __A = bbox_loss_coefficient __A = giou_loss_coefficient __A = eos_coefficient class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = version.parse("1.11" ) @property def UpperCamelCase_ ( self : Dict ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase_ ( self : Dict ): return 1E-4 @property def UpperCamelCase_ ( self : int ): return 12
15
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
0
"""simple docstring""" from math import factorial, radians def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : int = 18 , lowerCamelCase__ : int = 10 ) -> Optional[int]: lowerCamelCase_ : str =angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowerCamelCase_ : List[Any] =radians(UpperCamelCase__ ) lowerCamelCase_ : str =angle_in_radians lowerCamelCase_ : List[str] =3 lowerCamelCase_ : Tuple =-1 for _ in range(UpperCamelCase__ ): result += (b * (angle_in_radians**a)) / factorial(UpperCamelCase__ ) lowerCamelCase_ : str =-b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": __import__('doctest').testmod()
144
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
import heapq import sys import numpy as np a__: Optional[int] = tuple[int, int] class SCREAMING_SNAKE_CASE__ : def __init__( self ): A__ = [] A__ = set() def UpperCamelCase ( self ): if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def UpperCamelCase ( self ): return len(self.elements ) == 0 def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ): if item not in self.set: heapq.heappush(self.elements,(priority, item) ) self.set.add(__lowerCamelCase ) else: # update # print("update", item) A__ = [] (A__) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) (A__) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements,(pro, xxx) ) def UpperCamelCase ( self,__lowerCamelCase ): if item in self.set: self.set.remove(__lowerCamelCase ) A__ = [] (A__) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) (A__) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements,(prito, yyy) ) def UpperCamelCase ( self ): return self.elements[0][1] def UpperCamelCase ( self ): (A__) = heapq.heappop(self.elements ) self.set.remove(__lowerCamelCase ) return (priority, item) def UpperCamelCase__( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos )->Optional[int]: # euclidean distance A__ = np.array(UpperCamelCase__ ) A__ = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def UpperCamelCase__( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos )->List[Any]: # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def UpperCamelCase__( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos )->Tuple: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def UpperCamelCase__( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] )->Optional[Any]: A__ = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str )->int: A__ = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): A__ = "*" for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: A__ = "#" A__ = "-" A__ = back_pointer[goal] while x != start: (A__) = x # print(x) A__ = "-" A__ = back_pointer[x] A__ = "-" for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) A__ = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def UpperCamelCase__( UpperCamelCase__ : TPos )->Union[str, Any]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any , )->str: for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) (A__) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) A__ = -1 A__ = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def UpperCamelCase__( )->List[Any]: A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a__: List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a__: Tuple = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a__: Any = make_common_ground() a__: List[str] = blocks_blk # hyper parameters a__: Optional[int] = 1 a__: Tuple = 1 a__: Dict = 20 a__: Optional[int] = 3 # one consistent and two other inconsistent # start and end destination a__: str = (0, 0) a__: List[str] = (n - 1, n - 1) a__: Tuple = 1 def UpperCamelCase__( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int )->List[str]: A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: A__ = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: A__ = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
193
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
0
'''simple docstring''' from itertools import product def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(UpperCamelCase__ , max_face_number + 1 ) for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ): _UpperCAmelCase = sum(UpperCamelCase__ ) totals_frequencies[total] += 1 return totals_frequencies def UpperCAmelCase_ ( ) -> Dict: '''simple docstring''' _UpperCAmelCase = total_frequency_distribution( sides_number=4 , dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 , dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(UpperCamelCase__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
22
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
0
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A =logging.get_logger(__name__) __A ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} __A ={ 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } __A ={ 'abeja/gpt-neox-japanese-2.7b': 20_48, } def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f: UpperCAmelCase__ : Tuple = json.loads(f.read() ) UpperCAmelCase__ : str = collections.OrderedDict() UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict() UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict() with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f: UpperCAmelCase__ : Any = f.readlines() UpperCAmelCase__ : List[Any] = [[t.rstrip("""\n""" )] if (t == "," or "," not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(UpperCamelCase__ ): UpperCAmelCase__ : Optional[Any] = b UpperCAmelCase__ : List[str] = idx for wd in b: UpperCAmelCase__ : Union[str, Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class _snake_case ( a__ ): lowerCAmelCase :int = VOCAB_FILES_NAMES lowerCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase :Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|startoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase=False , **_lowerCamelCase , ): super().__init__( unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , do_clean_text=__lowerCamelCase , **__lowerCamelCase , ) if not os.path.isfile(__lowerCamelCase): raise ValueError( f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""") if not os.path.isfile(__lowerCamelCase): raise ValueError( f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""") UpperCAmelCase__ : Optional[int] = do_clean_text UpperCAmelCase__ : Any = load_vocab_and_emoji(__lowerCamelCase , __lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji) @property def snake_case__ ( self): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab) def snake_case__ ( self): return dict(self.raw_vocab , **self.added_tokens_encoder) def snake_case__ ( self , _lowerCamelCase): return self.subword_tokenizer.tokenize(__lowerCamelCase , clean=self.do_clean_text) def snake_case__ ( self , _lowerCamelCase): return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token)) def snake_case__ ( self , _lowerCamelCase): return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase) def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : List[str] = "".join(__lowerCamelCase).strip() return out_string def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase) + [self.eos_token_id]) if len(__lowerCamelCase) > self.model_max_length: UpperCAmelCase__ : List[Any] = input_ids[-self.model_max_length :] return input_ids def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None): UpperCAmelCase__ : Optional[Any] = 0 if os.path.isdir(__lowerCamelCase): UpperCAmelCase__ : Optional[int] = os.path.join( __lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) UpperCAmelCase__ : List[str] = os.path.join( __lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""]) else: UpperCAmelCase__ : Union[str, Any] = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Dict = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(__lowerCamelCase , """w""" , encoding="""utf-8""") as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""") UpperCAmelCase__ : List[Any] = token_index writer.write(""",""".join(__lowerCamelCase) + """\n""") index += 1 with open(__lowerCamelCase , """w""" , encoding="""utf-8""") as writer: json.dump(self.emoji , __lowerCamelCase) return vocab_file, emoji_file class _snake_case ( a__ ): def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Optional[int] = vocab # same as swe UpperCAmelCase__ : Optional[Any] = ids_to_tokens # same as bpe UpperCAmelCase__ : Tuple = emoji UpperCAmelCase__ : Dict = np.max([len(__lowerCamelCase) for w in self.vocab.keys()]) UpperCAmelCase__ : int = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""") UpperCAmelCase__ : int = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""") UpperCAmelCase__ : str = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""") UpperCAmelCase__ : Any = re.compile( r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""") UpperCAmelCase__ : List[Any] = re.compile( r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""") UpperCAmelCase__ : Optional[Any] = re.compile( r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""") UpperCAmelCase__ : Tuple = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" UpperCAmelCase__ : Dict = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" UpperCAmelCase__ : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks}) def __len__( self): return len(self.ids_to_tokens) def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = self.content_repattera.sub("""<URL>""" , __lowerCamelCase) UpperCAmelCase__ : Dict = self.content_repattera.sub("""<EMAIL>""" , __lowerCamelCase) UpperCAmelCase__ : Optional[Any] = self.content_repattera.sub("""<TEL>""" , __lowerCamelCase) UpperCAmelCase__ : Optional[int] = self.content_repattera.sub("""<DATE>""" , __lowerCamelCase) UpperCAmelCase__ : List[str] = self.content_repattera.sub("""<DATE>""" , __lowerCamelCase) UpperCAmelCase__ : List[str] = self.content_repattera.sub("""<PRICE>""" , __lowerCamelCase) UpperCAmelCase__ : int = content.translate(self.content_transa) while "<BLOCK><BLOCK>" in content: UpperCAmelCase__ : List[Any] = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""") return content def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False): UpperCAmelCase__ : Tuple = text.replace(""" """ , """<SP>""") UpperCAmelCase__ : Tuple = text.replace(""" """ , """<SP>""") UpperCAmelCase__ : List[Any] = text.replace("""\r\n""" , """<BR>""") UpperCAmelCase__ : Any = text.replace("""\n""" , """<BR>""") UpperCAmelCase__ : Optional[Any] = text.replace("""\r""" , """<BR>""") UpperCAmelCase__ : str = text.replace("""\t""" , """<TAB>""") UpperCAmelCase__ : List[Any] = text.replace("""—""" , """ー""") UpperCAmelCase__ : List[Any] = text.replace("""−""" , """ー""") for k, v in self.emoji["emoji"].items(): if k in text: UpperCAmelCase__ : Tuple = text.replace(__lowerCamelCase , __lowerCamelCase) if clean: UpperCAmelCase__ : Union[str, Any] = self.clean_text(__lowerCamelCase) def check_simbol(_lowerCamelCase): UpperCAmelCase__ : Optional[int] = x.encode() if len(__lowerCamelCase) == 1 and len(__lowerCamelCase) == 2: UpperCAmelCase__ : int = (int(e[0]) << 8) + int(e[1]) if ( (c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f) or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3) or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f) or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2) ): return True return False def checkuae(_lowerCamelCase): UpperCAmelCase__ : Optional[Any] = x.encode() if len(__lowerCamelCase) == 1 and len(__lowerCamelCase) == 3: UpperCAmelCase__ : Any = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f: return True return False UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : str = [] while pos < len(__lowerCamelCase): UpperCAmelCase__ : List[str] = min(len(__lowerCamelCase) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 UpperCAmelCase__ : Tuple = [] # (token_id, token, pos) for e in range(__lowerCamelCase , __lowerCamelCase , -1): UpperCAmelCase__ : Tuple = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__lowerCamelCase) > 2: UpperCAmelCase__ : str = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(__lowerCamelCase) > 0: # the smallest token_id is adopted UpperCAmelCase__ : Tuple = sorted(__lowerCamelCase , key=lambda _lowerCamelCase: x[0])[0] result.append(__lowerCamelCase) UpperCAmelCase__ : Any = e else: UpperCAmelCase__ : Union[str, Any] = pos + 1 UpperCAmelCase__ : Any = text[pos:end] if check_simbol(__lowerCamelCase): result.append("""<KIGOU>""") elif checkuae(__lowerCamelCase): result.append("""<U2000U2BFF>""") else: for i in wd.encode("""utf-8"""): result.append("""<|byte%d|>""" % i) UpperCAmelCase__ : Tuple = end return result def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase="\n"): UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : int = [] UpperCAmelCase__ : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2])) else: if len(__lowerCamelCase) > 0: words.append(bytearray(__lowerCamelCase).decode("""utf-8""" , errors="""replace""")) UpperCAmelCase__ : str = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word]) elif word == "<SP>": words.append(""" """) elif word == "<BR>": words.append(__lowerCamelCase) elif word == "<TAB>": words.append("""\t""") elif word == "<BLOCK>": words.append("""▀""") elif word == "<KIGOU>": words.append("""ǀ""") elif word == "<U2000U2BFF>": words.append("""‖""") else: words.append(__lowerCamelCase) if len(__lowerCamelCase) > 0: words.append(bytearray(__lowerCamelCase).decode("""utf-8""" , errors="""replace""")) UpperCAmelCase__ : Optional[Any] = "".join(__lowerCamelCase) return text
163
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __A : Dict = '''\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n''' class _UpperCAmelCase ( unittest.TestCase , _A ): def A ( self : Tuple ) -> Union[str, Any]: lowercase_ : List[str] = load_tool('''text-question-answering''' ) self.tool.setup() lowercase_ : List[Any] = load_tool('''text-question-answering''' , remote=__lowerCamelCase ) def A ( self : int ) -> int: lowercase_ : Union[str, Any] = self.tool(__lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' ) def A ( self : List[str] ) -> Dict: lowercase_ : List[Any] = self.remote_tool(__lowerCamelCase , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' ) def A ( self : str ) -> str: lowercase_ : int = self.tool(text=__lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' ) def A ( self : List[str] ) -> List[Any]: lowercase_ : Union[str, Any] = self.remote_tool(text=__lowerCamelCase , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__lowerCamelCase , '''launched the BigScience Research Workshop''' )
33
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
0
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels A : int = object() # For specifying empty leaf dict `{}` A : Dict = object() def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): __lowerCAmelCase = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' def replace(_UpperCamelCase , _UpperCamelCase ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _lowerCamelCase ( ): '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = _get_partition_rules() __lowerCAmelCase = _replacement_rules(UpperCamelCase__ ) __lowerCAmelCase = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} __lowerCAmelCase = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
57
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
0
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :int = (EulerDiscreteScheduler,) _SCREAMING_SNAKE_CASE :Optional[int] = 10 def _a ( self , **_a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = { "num_train_timesteps": 1_100, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowerCamelCase ) return config def _a ( self ) -> Dict: """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _a ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def _a ( self ) -> Optional[int]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def _a ( self ) -> Optional[int]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : List[str] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = output.prev_sample SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(prediction_type="""v_prediction""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : int = sample.to(__lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = self.dummy_model() SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE__ : Tuple = sample.to(__lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : List[str] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Any = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : int = output.prev_sample SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 10.0_807 ) < 1E-2 assert abs(result_mean.item() - 0.0_131 ) < 1E-3 def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**__lowerCamelCase , use_karras_sigmas=__lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(__lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Any = model(__lowerCamelCase , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : str = output.prev_sample SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2 assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
132
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
156
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
0
"""simple docstring""" import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __A = get_tests_dir("fixtures/dummy_feature_extractor_config.json") __A = get_tests_dir("fixtures/vocab.json") __A = get_tests_dir("fixtures") class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] def _snake_case ( self ): lowercase__: str = 0 def _snake_case ( self ): lowercase__: Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__: Union[str, Any] = WavaVecaConfig() lowercase__: List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) lowercase__: Dict = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) ) lowercase__: str = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__: Dict = WavaVecaFeatureExtractor() lowercase__: List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) lowercase__: Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase ) # save in new folder processor.save_pretrained(__lowerCamelCase ) # drop `processor_class` in tokenizer with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f: lowercase__: Dict = json.load(__lowerCamelCase ) config_dict.pop('''processor_class''' ) with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write(json.dumps(__lowerCamelCase ) ) lowercase__: int = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__: Tuple = WavaVecaFeatureExtractor() lowercase__: List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) lowercase__: Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase ) # save in new folder processor.save_pretrained(__lowerCamelCase ) # drop `processor_class` in feature extractor with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f: lowercase__: Optional[Any] = json.load(__lowerCamelCase ) config_dict.pop('''processor_class''' ) with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write(json.dumps(__lowerCamelCase ) ) lowercase__: str = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__: Tuple = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(__lowerCamelCase ) # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write('''{}''' ) lowercase__: Optional[int] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCamelCase ): lowercase__: Any = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCamelCase ): lowercase__: Optional[Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) lowercase__: str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) lowercase__: Any = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) lowercase__: Dict = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowercase__: str = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase ) lowercase__: List[Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def _snake_case ( self ): try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase ) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase ) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase__: Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: Optional[int] = os.path.join(__lowerCamelCase , '''vocab.txt''' ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__: List[str] = CustomTokenizer(__lowerCamelCase ) lowercase__: Tuple = CustomProcessor(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__lowerCamelCase ) lowercase__: Tuple = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self ): class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = False class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = False class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Optional[Any] = "AutoFeatureExtractor" _UpperCAmelCase :List[str] = "AutoTokenizer" _UpperCAmelCase :Optional[Any] = False try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase ) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase ) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # If remote code is not set, the default is to use local classes. lowercase__: Dict = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase__: Any = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase__: Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self ): lowercase__: Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def _snake_case ( self ): lowercase__: Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class UpperCAmelCase (unittest.TestCase ): """simple docstring""" _UpperCAmelCase :List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _snake_case ( cls ): lowercase__: Any = TOKEN HfFolder.save_token(__lowerCamelCase ) @classmethod def _snake_case ( cls ): try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def _snake_case ( self ): lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , '''test-processor''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) lowercase__: str = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _snake_case ( self ): lowercase__: Optional[Any] = WavaVecaProcessor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , '''test-processor-org''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token , organization='''valid_org''' , ) lowercase__: List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _snake_case ( self ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase__: Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__: str = os.path.join(__lowerCamelCase , '''vocab.txt''' ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) lowercase__: List[Any] = CustomTokenizer(__lowerCamelCase ) lowercase__: int = CustomProcessor(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) lowercase__: Optional[Any] = Repository(__lowerCamelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(__lowerCamelCase ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) ) as f: lowercase__: Any = json.load(__lowerCamelCase ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_processing.py''' ) ) ) repo.push_to_hub() lowercase__: List[Any] = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=__lowerCamelCase ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
177
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
0
import requests from bsa import BeautifulSoup def UpperCAmelCase ( a_ = "https://www.worldometers.info/coronavirus" ) -> Tuple: """simple docstring""" __A = BeautifulSoup(requests.get(UpperCamelCase__ ).text , "html.parser" ) __A = soup.findAll("h1" ) __A = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
15
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
0
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) __a = "" while len(UpperCamelCase__ ) % 3 != 0: __a = "0" + bin_string __a = [ bin_string[index : index + 3] for index in range(len(UpperCamelCase__ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: __a = 0 for index, val in enumerate(UpperCamelCase__ ): oct_val += int(2 ** (2 - index) * int(UpperCamelCase__ ) ) oct_string += str(UpperCamelCase__ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
302
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
0
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _snake_case ( lowerCamelCase__ : Union[str, Any] ) -> List[Any]: lowerCamelCase_ : Any =image.size lowerCamelCase_ : str =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCamelCase_ : List[str] =image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) lowerCamelCase_ : Any =np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0 lowerCamelCase_ : Optional[Any] =image[None].transpose(0 , 3 , 1 , 2 ) lowerCamelCase_ : Union[str, Any] =torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowercase__ ( snake_case__ ): def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int , ): super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase ) @torch.no_grad() def __call__( self : Tuple , snake_case__ : str = None , snake_case__ : int = 1 , snake_case__ : str = 100 , snake_case__ : Any = 0.0 , snake_case__ : Optional[Any] = None , snake_case__ : Optional[Any] = "pil" , snake_case__ : Dict = True , ): if isinstance(__lowerCamelCase , PIL.Image.Image ): lowerCamelCase_ : Tuple =1 elif isinstance(__lowerCamelCase , torch.Tensor ): lowerCamelCase_ : Union[str, Any] =image.shape[0] else: raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}""" ) if isinstance(__lowerCamelCase , PIL.Image.Image ): lowerCamelCase_ : Union[str, Any] =preprocess(__lowerCamelCase ) lowerCamelCase_ : Union[str, Any] =image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCamelCase_ : Optional[Any] =(batch_size, self.unet.config.in_channels // 2, height, width) lowerCamelCase_ : str =next(self.unet.parameters() ).dtype lowerCamelCase_ : Union[str, Any] =randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase ) lowerCamelCase_ : List[Any] =image.to(device=self.device , dtype=__lowerCamelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device ) lowerCamelCase_ : Any =self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : List[str] =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase_ : str ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Optional[int] ={} if accepts_eta: lowerCamelCase_ : List[Any] =eta for t in self.progress_bar(__lowerCamelCase ): # concat latents and low resolution image in the channel dimension. lowerCamelCase_ : List[Any] =torch.cat([latents, image] , dim=1 ) lowerCamelCase_ : str =self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) # predict the noise residual lowerCamelCase_ : Any =self.unet(__lowerCamelCase , __lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : Optional[int] =self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample # decode the image latents with the VQVAE lowerCamelCase_ : Union[str, Any] =self.vqvae.decode(__lowerCamelCase ).sample lowerCamelCase_ : Dict =torch.clamp(__lowerCamelCase , -1.0 , 1.0 ) lowerCamelCase_ : Tuple =image / 2 + 0.5 lowerCamelCase_ : int =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : Optional[int] =self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase )
144
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__: int = logging.get_logger(__name__) a__: Optional[Any] = { 'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json', 'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json', 'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json', 'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json', 'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json', 'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json', 'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json', 'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json', 'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json', 'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = '''rwkv''' __SCREAMING_SNAKE_CASE = {'''max_position_embeddings''': '''context_length'''} def __init__( self,__lowerCamelCase=5_0277,__lowerCamelCase=1024,__lowerCamelCase=4096,__lowerCamelCase=32,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=1E-5,__lowerCamelCase=0,__lowerCamelCase=0,__lowerCamelCase=6,__lowerCamelCase=False,__lowerCamelCase=True,**__lowerCamelCase,): A__ = vocab_size A__ = context_length A__ = hidden_size A__ = num_hidden_layers A__ = attention_hidden_size if attention_hidden_size is not None else hidden_size A__ = intermediate_size if intermediate_size is not None else 4 * hidden_size A__ = layer_norm_epsilon A__ = rescale_every A__ = use_cache A__ = bos_token_id A__ = eos_token_id super().__init__( tie_word_embeddings=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,**__lowerCamelCase )
193
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
0
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : int = 1000 ) -> int: '''simple docstring''' return sum(e for e in range(3 , UpperCamelCase__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
22
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
0
'''simple docstring''' import os def _UpperCamelCase ( ): UpperCAmelCase__ : Tuple = os.path.join(os.path.dirname(UpperCamelCase__ ) , """num.txt""" ) with open(UpperCamelCase__ ) as file_hand: return str(sum(int(UpperCamelCase__ ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
163
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
0
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __A : List[str] = 10 def lowercase ( __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : int ): for i in range(UpperCamelCase__ , UpperCamelCase__ ): if array[i] == target: return i return -1 def lowercase ( __snake_case : list[int] , __snake_case : int ): lowercase_ : Any = 0 lowercase_ : Union[str, Any] = len(UpperCamelCase__ ) while left <= right: if right - left < precision: return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ : Dict = (left + right) // 3 + 1 lowercase_ : Any = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowercase_ : Optional[Any] = one_third - 1 elif array[two_third] < target: lowercase_ : List[Any] = two_third + 1 else: lowercase_ : int = one_third + 1 lowercase_ : List[Any] = two_third - 1 else: return -1 def lowercase ( __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : int ): if left < right: if right - left < precision: return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ : Optional[int] = (left + right) // 3 + 1 lowercase_ : Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __A : str = input('''Enter numbers separated by comma:\n''').strip() __A : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." __A : Tuple = int(input('''Enter the number to be found in the list:\n''').strip()) __A : str = ite_ternary_search(collection, target) __A : int = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F"""Iterative search: {target} found at positions: {resulta}""") print(F"""Recursive search: {target} found at positions: {resulta}""") else: print('''Not found''')
33
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
0
"""simple docstring""" from math import pi, sqrt, tan def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) __lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) __lowerCAmelCase = (sidea + sidea + sidea) / 2 __lowerCAmelCase = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''') print(f'''Square: {area_square(1_0) = }''') print(f'''Triangle: {area_triangle(1_0, 1_0) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''') print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''') print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''') print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''') print(f'''Circle: {area_circle(2_0) = }''') print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''') print("\nSurface Areas of various geometric shapes: \n") print(f'''Cube: {surface_area_cube(2_0) = }''') print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''') print(f'''Sphere: {surface_area_sphere(2_0) = }''') print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''') print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''') print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''') print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''') print(f'''Square: {area_reg_polygon(4, 1_0) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
57
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Optional[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(__lowerCAmelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__lowerCAmelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = [] for i in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE__ : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) ) return torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) class __a (UpperCamelCase_ , UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Tuple = [e.name for e in KarrasDiffusionSchedulers] _SCREAMING_SNAKE_CASE :Union[str, Any] = 2 @register_to_config def __init__( self , _a = 1_000 , _a = 0.00_085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ) -> Optional[int]: """simple docstring""" if trained_betas is not None: SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(__lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE__ : int = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE__ : int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE__ : Dict = betas_for_alpha_bar(__lowerCamelCase ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = 1.0 - self.betas SCREAMING_SNAKE_CASE__ : Tuple = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _a ( self , _a , _a=None ) -> List[Any]: """simple docstring""" if schedule_timesteps is None: SCREAMING_SNAKE_CASE__ : List[str] = self.timesteps SCREAMING_SNAKE_CASE__ : str = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: SCREAMING_SNAKE_CASE__ : Any = 1 if len(__lowerCamelCase ) > 1 else 0 else: SCREAMING_SNAKE_CASE__ : List[Any] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep SCREAMING_SNAKE_CASE__ : List[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def _a ( self ) -> Union[str, Any]: """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _a ( self , _a , _a , ) -> torch.FloatTensor: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.index_for_timestep(__lowerCamelCase ) if self.state_in_first_order: SCREAMING_SNAKE_CASE__ : Tuple = self.sigmas[step_index] else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE__ : Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def _a ( self , _a , _a = None , _a = None , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = num_inference_steps SCREAMING_SNAKE_CASE__ : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE__ : str = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE__ : Optional[int] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__ : List[Any] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE__ : int = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__ : str = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(np.log(__lowerCamelCase ) ).to(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : str = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ) # interpolate sigmas SCREAMING_SNAKE_CASE__ : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__lowerCamelCase ).startswith("""mps""" ): # mps does not support float64 SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE__ : int = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) # interpolate timesteps SCREAMING_SNAKE_CASE__ : Dict = self.sigma_to_t(__lowerCamelCase ).to(__lowerCamelCase , dtype=timesteps.dtype ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() SCREAMING_SNAKE_CASE__ : int = torch.cat([timesteps[:1], interleaved_timesteps] ) SCREAMING_SNAKE_CASE__ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE__ : str = defaultdict(__lowerCamelCase ) def _a ( self , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = sigma.log() # get distribution SCREAMING_SNAKE_CASE__ : List[str] = log_sigma - self.log_sigmas[:, None] # get sigmas range SCREAMING_SNAKE_CASE__ : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = low_idx + 1 SCREAMING_SNAKE_CASE__ : List[str] = self.log_sigmas[low_idx] SCREAMING_SNAKE_CASE__ : Any = self.log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE__ : Optional[Any] = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE__ : Dict = w.clamp(0 , 1 ) # transform interpolation to time range SCREAMING_SNAKE_CASE__ : List[Any] = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE__ : Any = t.view(sigma.shape ) return t @property def _a ( self ) -> Any: """simple docstring""" return self.sample is None def _a ( self , _a , _a , _a , _a = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.index_for_timestep(__lowerCamelCase ) # advance index counter by 1 SCREAMING_SNAKE_CASE__ : Optional[int] = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE__ : Any = self.sigmas[step_index] SCREAMING_SNAKE_CASE__ : Any = self.sigmas_interpol[step_index + 1] SCREAMING_SNAKE_CASE__ : str = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method SCREAMING_SNAKE_CASE__ : Any = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE__ : List[str] = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE__ : int = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE__ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE__ : Tuple = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE__ : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order SCREAMING_SNAKE_CASE__ : int = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep SCREAMING_SNAKE_CASE__ : Union[str, Any] = sigma_next - sigma_hat SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sample SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Optional[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__lowerCamelCase ) def _a ( self , _a , _a , _a , ) -> torch.FloatTensor: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ): # mps does not support float64 SCREAMING_SNAKE_CASE__ : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE__ : Dict = self.timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE__ : int = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE__ : Dict = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps] SCREAMING_SNAKE_CASE__ : Dict = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE__ : Dict = sigma.unsqueeze(-1 ) SCREAMING_SNAKE_CASE__ : int = original_samples + noise * sigma return noisy_samples def __len__( self ) -> List[Any]: """simple docstring""" return self.config.num_train_timesteps
132
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
0
class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[int] ): __lowercase : Union[str, Any] = {} def snake_case_ ( self : List[str] ): print(self.vertex ) for i in self.vertex: print(__lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCamelCase ) for j in self.vertex[i]] ) ) def snake_case_ ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__lowerCamelCase ) else: # else make a new vertex __lowercase : Optional[Any] = [to_vertex] def snake_case_ ( self : Optional[int] ): # visited array for storing already visited nodes __lowercase : List[Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) def snake_case_ ( self : Any , _snake_case : Union[str, Any] , _snake_case : List[Any] ): # mark start vertex as visited __lowercase : str = True print(__lowerCamelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
156
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
0
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets __A = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" __A = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" __A = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> int: if label_map is not None: for old_id, new_id in label_map.items(): lowercase__: Union[str, Any] = new_id # turn into Numpy arrays lowercase__: str = np.array(UpperCamelCase__ ) lowercase__: List[Any] = np.array(UpperCamelCase__ ) if reduce_labels: lowercase__: str = 2_5_5 lowercase__: Union[str, Any] = label - 1 lowercase__: Optional[int] = 2_5_5 lowercase__: List[Any] = label != ignore_index lowercase__: Any = np.not_equal(UpperCamelCase__ , UpperCamelCase__ ) lowercase__: str = pred_label[mask] lowercase__: Any = np.array(UpperCamelCase__ )[mask] lowercase__: Tuple = pred_label[pred_label == label] lowercase__: int = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowercase__: List[Any] = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowercase__: Optional[int] = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] lowercase__: Tuple = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Union[str, Any]: lowercase__: List[Any] = np.zeros((num_labels,) , dtype=np.floataa ) lowercase__: int = np.zeros((num_labels,) , dtype=np.floataa ) lowercase__: Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa ) lowercase__: int = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ): lowercase__: str = intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[str]: lowercase__: Optional[int] = total_intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # compute metrics lowercase__: List[str] = {} lowercase__: Any = total_area_intersect.sum() / total_area_label.sum() lowercase__: Union[str, Any] = total_area_intersect / total_area_union lowercase__: Union[str, Any] = total_area_intersect / total_area_label lowercase__: Union[str, Any] = np.nanmean(UpperCamelCase__ ) lowercase__: List[Any] = np.nanmean(UpperCamelCase__ ) lowercase__: List[str] = all_acc lowercase__: Tuple = iou lowercase__: Tuple = acc if nan_to_num is not None: lowercase__: List[Any] = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase (datasets.Metric ): """simple docstring""" def _snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ): lowercase__: int = mean_iou( results=__lowerCamelCase , gt_seg_maps=__lowerCamelCase , num_labels=__lowerCamelCase , ignore_index=__lowerCamelCase , nan_to_num=__lowerCamelCase , label_map=__lowerCamelCase , reduce_labels=__lowerCamelCase , ) return iou_result
177
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: SCREAMING_SNAKE_CASE :List[str] = None SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE :int = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE :Dict = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } SCREAMING_SNAKE_CASE :Dict = '▁' class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = AlbertTokenizer def __init__( self : Dict ,A : Optional[int]=None ,A : Dict=None ,A : int=True ,A : Any=True ,A : List[str]=False ,A : str="[CLS]" ,A : Any="[SEP]" ,A : Dict="<unk>" ,A : int="[SEP]" ,A : int="<pad>" ,A : str="[CLS]" ,A : Tuple="[MASK]" ,**A : Optional[int] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __A = ( AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ,normalized=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token ) super().__init__( __lowerCamelCase ,tokenizer_file=__lowerCamelCase ,do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,) __A = do_lower_case __A = remove_space __A = keep_accents __A = vocab_file __A = False if not self.vocab_file else True def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ,A : int = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : str ,A : int ,A : str = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Tuple = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( __lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file ,__lowerCamelCase ) return (out_vocab_file,)
15
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] =['image_processor', 'tokenizer'] __lowerCamelCase : int ='OwlViTImageProcessor' __lowerCamelCase : Tuple =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Dict , __lowercase : Any=None , __lowercase : Optional[Any]=None , **__lowercase : Union[str, Any] ): '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCamelCase , ) __a = kwargs.pop("""feature_extractor""" ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : int , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : Union[str, Any]=None , __lowercase : Any="max_length" , __lowercase : Tuple="np" , **__lowercase : int ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )): __a = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )] elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ): __a = [] # Maximum number of queries across batch __a = max([len(__lowerCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase ) != max_num_queries: __a = t + [" "] * (max_num_queries - len(__lowerCamelCase )) __a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) encodings.append(__lowerCamelCase ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": __a = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __a = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __a = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __a = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __a = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) __a = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __a = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) __a = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) __a = BatchEncoding() __a = input_ids __a = attention_mask if query_images is not None: __a = BatchEncoding() __a = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values __a = query_pixel_values if images is not None: __a = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is not None and images is not None: __a = image_features.pixel_values return encoding elif query_images is not None and images is not None: __a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , *__lowercase : int , **__lowercase : Union[str, Any] ): '''simple docstring''' return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self : Optional[int] , *__lowercase : int , **__lowercase : Optional[Any] ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , *__lowercase : int , **__lowercase : str ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , *__lowercase : Any , **__lowercase : List[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def UpperCamelCase_ ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ): '''simple docstring''' return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCamelCase , ) return self.image_processor_class @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCamelCase , ) return self.image_processor
302
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class lowercase__ ( snake_case__ ): _UpperCAmelCase :Optional[Any] = 42 _UpperCAmelCase :Tuple = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
144
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
import argparse import os import re a__: Any = 'src/transformers' # Pattern that looks at the indentation in a line. a__: str = re.compile(r'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. a__: Union[str, Any] = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. a__: Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. a__: Dict = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. a__: str = re.compile(r'\[([^\]]+)\]') def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Optional[Any]: A__ = _re_indent.search(UpperCamelCase__ ) return "" if search is None else search.groups()[0] def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]="" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None )->List[str]: A__ = 0 A__ = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(UpperCamelCase__ ): index += 1 A__ = ["\n".join(lines[:index] )] else: A__ = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). A__ = [lines[index]] index += 1 while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(UpperCamelCase__ ) ) if index < len(UpperCamelCase__ ) - 1: A__ = [lines[index + 1]] index += 1 else: A__ = [] else: blocks.append('''\n'''.join(UpperCamelCase__ ) ) A__ = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(UpperCamelCase__ ) > 0: blocks.append('''\n'''.join(UpperCamelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(UpperCamelCase__ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def UpperCamelCase__( UpperCamelCase__ : Any )->Any: def _inner(UpperCamelCase__ : str ): return key(UpperCamelCase__ ).lower().replace('''_''' , '''''' ) return _inner def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=None )->int: # If no key is provided, we use a noop. def noop(UpperCamelCase__ : Optional[Any] ): return x if key is None: A__ = noop # Constants are all uppercase, they go first. A__ = [obj for obj in objects if key(UpperCamelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. A__ = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()] # Functions begin with a lowercase, they go last. A__ = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()] A__ = ignore_underscore(UpperCamelCase__ ) return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Optional[Any]: # This inner function sort imports between [ ]. def _replace(UpperCamelCase__ : int ): A__ = match.groups()[0] if "," not in imports: return f"[{imports}]" A__ = [part.strip().replace('''\"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(UpperCamelCase__ )] ) + "]" A__ = import_statement.split('''\n''' ) if len(UpperCamelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. A__ = 2 if lines[1].strip() == "[" else 1 A__ = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] A__ = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] ) A__ = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(UpperCamelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: A__ = _re_bracket_content.sub(_replace , lines[1] ) else: A__ = [part.strip().replace('''\"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ = keys[:-1] A__ = get_indent(lines[1] ) + ", ".join([f"\"{k}\"" for k in sort_objects(UpperCamelCase__ )] ) return "\n".join(UpperCamelCase__ ) else: # Finally we have to deal with imports fitting on one line A__ = _re_bracket_content.sub(_replace , UpperCamelCase__ ) return import_statement def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : int=True )->int: with open(UpperCamelCase__ , encoding='''utf-8''' ) as f: A__ = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 A__ = split_code_in_indented_blocks( UpperCamelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. A__ = main_blocks[block_idx] A__ = block.split('''\n''' ) # Get to the start of the imports. A__ = 0 while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: A__ = len(UpperCamelCase__ ) else: line_idx += 1 if line_idx >= len(UpperCamelCase__ ): continue # Ignore beginning and last line: they don't contain anything. A__ = "\n".join(block_lines[line_idx:-1] ) A__ = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. A__ = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend A__ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. A__ = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. A__ = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None] A__ = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. A__ = 0 A__ = [] for i in range(len(UpperCamelCase__ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: A__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(UpperCamelCase__ ) count += 1 # And we put our main block back together with its first and last line. A__ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(UpperCamelCase__ ): if check_only: return True else: print(f"Overwriting {file}." ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(UpperCamelCase__ ) ) def UpperCamelCase__( UpperCamelCase__ : Optional[int]=True )->Any: A__ = [] for root, _, files in os.walk(UpperCamelCase__ ): if "__init__.py" in files: A__ = sort_imports(os.path.join(UpperCamelCase__ , '''__init__.py''' ) , check_only=UpperCamelCase__ ) if result: A__ = [os.path.join(UpperCamelCase__ , '''__init__.py''' )] if len(UpperCamelCase__ ) > 0: raise ValueError(f"Would overwrite {len(UpperCamelCase__ )} files, run `make style`." ) if __name__ == "__main__": a__: str = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') a__: List[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
193
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A_ ( lowerCAmelCase_ ): _lowerCamelCase : int = 42 _lowerCamelCase : List[str] = 42 _lowerCamelCase : List[Any] = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
22
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __SCREAMING_SNAKE_CASE = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "Overwrite the cached training and evaluation sets"}) __SCREAMING_SNAKE_CASE = field( default=a , metadata={"help": "The number of processes to use for the preprocessing."} , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __SCREAMING_SNAKE_CASE = field( default=a , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self) -> int: if self.train_file is not None: _A : Optional[int] = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _A : Dict = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class lowerCAmelCase__ : '''simple docstring''' __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , __lowerCamelCase) -> str: _A : List[Any] = "label" if "label" in features[0].keys() else "labels" _A : Any = [feature.pop(__lowerCamelCase) for feature in features] _A : Optional[int] = len(__lowerCamelCase) _A : int = len(features[0]["input_ids"]) _A : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features ] _A : str = list(chain(*__lowerCamelCase)) _A : Tuple = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()} # Add back labels _A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa) return batch def _UpperCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A : int = training_args.get_process_log_level() logger.setLevel(UpperCamelCase__ ) datasets.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.set_verbosity(UpperCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _A : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _A : List[str] = {} if data_args.train_file is not None: _A : Optional[int] = data_args.train_file if data_args.validation_file is not None: _A : Tuple = data_args.validation_file _A : Union[str, Any] = data_args.train_file.split("." )[-1] _A : List[str] = load_dataset( UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. _A : Union[str, Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _A : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _A : str = [f"ending{i}" for i in range(4 )] _A : Union[str, Any] = "sent1" _A : str = "sent2" if data_args.max_seq_length is None: _A : Any = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _A : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) _A : int = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCamelCase__ : List[Any] ): _A : List[Any] = [[context] * 4 for context in examples[context_name]] _A : Any = examples[question_header_name] _A : Union[str, Any] = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ ) ] # Flatten out _A : Dict = list(chain(*UpperCamelCase__ ) ) _A : List[Any] = list(chain(*UpperCamelCase__ ) ) # Tokenize _A : str = tokenizer( UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _A : Optional[int] = raw_datasets["train"] if data_args.max_train_samples is not None: _A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples ) _A : Any = train_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _A : Optional[int] = train_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _A : Optional[int] = raw_datasets["validation"] if data_args.max_eval_samples is not None: _A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples ) _A : Dict = eval_dataset.select(range(UpperCamelCase__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _A : List[str] = eval_dataset.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator _A : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCamelCase__ : Tuple ): _A , _A : List[str] = eval_predictions _A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _A : List[str] = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: _A : Any = None if training_args.resume_from_checkpoint is not None: _A : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A : int = last_checkpoint _A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _A : Optional[int] = train_result.metrics _A : Tuple = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ ) ) _A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("train" , UpperCamelCase__ ) trainer.save_metrics("train" , UpperCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _A : List[Any] = trainer.evaluate() _A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ ) _A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) ) trainer.log_metrics("eval" , UpperCamelCase__ ) trainer.save_metrics("eval" , UpperCamelCase__ ) _A : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase__ ) else: trainer.create_model_card(**UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
11
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A =logging.get_logger(__name__) __A ={ 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class _snake_case ( a__ ): lowerCAmelCase :Optional[Any] = '''roc_bert''' def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=768 , _lowerCamelCase=910 , _lowerCamelCase=512 , _lowerCamelCase=2_4858 , _lowerCamelCase=True , **_lowerCamelCase , ): UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : str = num_hidden_layers UpperCAmelCase__ : Dict = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Dict = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Union[str, Any] = use_cache UpperCAmelCase__ : Optional[Any] = enable_pronunciation UpperCAmelCase__ : Tuple = enable_shape UpperCAmelCase__ : str = pronunciation_embed_dim UpperCAmelCase__ : Tuple = pronunciation_vocab_size UpperCAmelCase__ : Union[str, Any] = shape_embed_dim UpperCAmelCase__ : List[str] = shape_vocab_size UpperCAmelCase__ : List[str] = concat_input UpperCAmelCase__ : Tuple = position_embedding_type UpperCAmelCase__ : Union[str, Any] = classifier_dropout super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase)
163
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ]) class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self) -> str: if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , ) assert hasattr(self , "env") def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: _A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings _A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , ) def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(2,)]) def _lowerCamelCase ( self , __lowerCamelCase) -> Any: # create estimator _A : Union[str, Any] = self.create_estimator(__lowerCamelCase) # run training estimator.fit() # result dataframe _A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) _A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
11
0
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _UpperCAmelCase ( unittest.TestCase ): def A ( self : List[Any] , A : Union[str, Any] ) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): lowercase_ : Any = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase ) def A ( self : Any ) -> Tuple: lowercase_ : List[str] = "sshleifer/tiny-gpt2" lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowercase_ : Any = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Dict ) -> Tuple: lowercase_ : Union[str, Any] = "sgugger/tiny-distilbert-classification" lowercase_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) lowercase_ : List[Any] = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Dict ) -> Union[str, Any]: lowercase_ : Any = "sshleifer/tiny-gpt2" lowercase_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Tuple ) -> Optional[int]: lowercase_ : Optional[Any] = "sshleifer/tiny-gpt2" lowercase_ : str = AutoConfig.from_pretrained(__lowerCamelCase ) lowercase_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase , [config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : List[Any] ) -> Union[str, Any]: lowercase_ : Any = "sshleifer/tiny-gpt2" lowercase_ : List[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowercase_ : Dict = TensorFlowBenchmark(__lowerCamelCase , [config] ) lowercase_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : Optional[int] ) -> List[str]: lowercase_ : str = "sshleifer/tiny-gpt2" lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : Tuple ) -> List[str]: lowercase_ : str = "sshleifer/tiny-gpt2" lowercase_ : Optional[int] = AutoConfig.from_pretrained(__lowerCamelCase ) lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase , [config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def A ( self : str ) -> Union[str, Any]: lowercase_ : Optional[Any] = "patrickvonplaten/t5-tiny-random" lowercase_ : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase ) lowercase_ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) lowercase_ : List[str] = TensorFlowBenchmark(__lowerCamelCase , configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def A ( self : Dict ) -> str: lowercase_ : Dict = "sshleifer/tiny-gpt2" lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowercase_ : List[Any] = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def A ( self : str ) -> Optional[int]: lowercase_ : Dict = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__lowerCamelCase , '''env.csv''' ) , multi_process=__lowerCamelCase , ) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__lowerCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__lowerCamelCase , '''env.csv''' ) ).exists() ) def A ( self : Any ) -> Tuple: lowercase_ : Optional[Any] = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(A : Dict ): self.assertTrue(hasattr(__lowerCamelCase , '''sequential''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''cumulative''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''current''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , '''log.txt''' ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , ) lowercase_ : Optional[int] = TensorFlowBenchmark(__lowerCamelCase ) lowercase_ : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__lowerCamelCase , '''log.txt''' ) ).exists() )
33
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"] __SCREAMING_SNAKE_CASE = "OwlViTImageProcessor" __SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]: _A : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) _A : List[Any] = kwargs.pop("feature_extractor") _A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__lowerCamelCase , __lowerCamelCase) def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any: if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none.") if text is not None: if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)): _A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)] elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase): _A : Optional[Any] = [] # Maximum number of queries across batch _A : str = max([len(__lowerCamelCase) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__lowerCamelCase) != max_num_queries: _A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase)) _A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) encodings.append(__lowerCamelCase) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings") if return_tensors == "np": _A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0) _A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0) _A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0) else: raise ValueError("Target return tensor type could not be returned") _A : Optional[Any] = BatchEncoding() _A : Tuple = input_ids _A : Dict = attention_mask if query_images is not None: _A : Optional[Any] = BatchEncoding() _A : List[str] = self.image_processor( __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values _A : Union[str, Any] = query_pixel_values if images is not None: _A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) if text is not None and images is not None: _A : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: _A : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str: return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]: return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase) @property def _lowerCamelCase ( self) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self) -> List[str]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
11
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Optional[int] ="""xlm-roberta-xl""" def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.0_2 , __a=1e-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' @property def snake_case ( self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
57
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): _A : Optional[int] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__lowerCamelCase) def _lowerCamelCase ( self) -> int: _A : Optional[int] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase) _A : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Dict: _A : int = "sgugger/tiny-distilbert-classification" _A : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = "sshleifer/tiny-gpt2" _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase) _A : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def _lowerCamelCase ( self) -> int: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Any = PyTorchBenchmark(__lowerCamelCase) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> Any: _A : Union[str, Any] = "sshleifer/tiny-gpt2" _A : Any = AutoConfig.from_pretrained(__lowerCamelCase) # set architectures equal to `None` _A : Dict = None _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : List[Any] = "sshleifer/tiny-gpt2" _A : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase) _A : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def _lowerCamelCase ( self) -> Optional[Any]: _A : Any = "sshleifer/tiny-gpt2" _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : List[Any] = PyTorchBenchmark(__lowerCamelCase) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> str: _A : List[str] = "sshleifer/tiny-gpt2" _A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> int: _A : Tuple = "sshleifer/tinier_bart" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCamelCase ( self) -> str: _A : List[Any] = "sshleifer/tiny-gpt2" _A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase) _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> int: _A : int = "sshleifer/tinier_bart" _A : str = AutoConfig.from_pretrained(__lowerCamelCase) _A : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config]) _A : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCamelCase ( self) -> Dict: _A : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , ) _A : Tuple = PyTorchBenchmark(__lowerCamelCase) benchmark.run() self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists()) def _lowerCamelCase ( self) -> int: _A : Dict = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__lowerCamelCase): self.assertTrue(hasattr(__lowerCamelCase , "sequential")) self.assertTrue(hasattr(__lowerCamelCase , "cumulative")) self.assertTrue(hasattr(__lowerCamelCase , "current")) self.assertTrue(hasattr(__lowerCamelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , ) _A : Optional[int] = PyTorchBenchmark(__lowerCamelCase) _A : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
11
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __a : '''simple docstring''' def __init__( self , _a=2 , _a=3 , _a=64 , _a=None ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = np.random.default_rng(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Dict = length SCREAMING_SNAKE_CASE__ : Any = rng.normal(size=(length,) ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Union[str, Any]: """simple docstring""" return self.length def __getitem__( self , _a ) -> Any: """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class __a (torch.nn.Module): '''simple docstring''' def __init__( self , _a=0 , _a=0 , _a=False ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE__ : str = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) SCREAMING_SNAKE_CASE__ : Any = True def _a ( self , _a=None ) -> Optional[int]: """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) SCREAMING_SNAKE_CASE__ : Any = False return x * self.a[0] + self.b[0] class __a (torch.nn.Module): '''simple docstring''' def __init__( self , _a=0 , _a=0 , _a=False ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Any = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() ) SCREAMING_SNAKE_CASE__ : str = True def _a ( self , _a=None ) -> int: """simple docstring""" if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = False return x * self.a + self.b def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Optional[Any]: from datasets import load_dataset from transformers import AutoTokenizer SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" ) SCREAMING_SNAKE_CASE__ : int = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} SCREAMING_SNAKE_CASE__ : Any = load_dataset("""csv""" , data_files=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int = datasets["train"].unique("""label""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: i for i, v in enumerate(UpperCamelCase__ )} def tokenize_function(__lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ : str = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" ) if "label" in examples: SCREAMING_SNAKE_CASE__ : Optional[Any] = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE__ : int = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(__lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ : int = DataLoader(tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 ) SCREAMING_SNAKE_CASE__ : Any = DataLoader(tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
132
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } lowerCAmelCase__ = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = NllbTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it _A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token _A : Optional[int] = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , ) _A : int = vocab_file _A : Optional[Any] = False if not self.vocab_file else True _A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "eng_Latn" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : Tuple = [self.sep_token_id] _A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : List[Any] = src_lang _A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Tuple = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding: _A : Tuple = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> str: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : List[str] = [] _A : Dict = [self.eos_token_id, self.cur_lang_code] else: _A : Tuple = [self.cur_lang_code] _A : Optional[Any] = [self.eos_token_id] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase) if self.legacy_behaviour: _A : Tuple = [] _A : Any = [self.eos_token_id, self.cur_lang_code] else: _A : Union[str, Any] = [self.cur_lang_code] _A : str = [self.eos_token_id] _A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : Dict = self.convert_ids_to_tokens(self.suffix_tokens) _A : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : Dict = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowerCAmelCase : Union[str, Any] = "base_with_context" def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: __lowercase : Tuple = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) __lowercase : int = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): __lowercase : Tuple = weights[F'layers_{lyr_num}'] __lowercase : Any = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowercase : Optional[int] = ly_weight["attention"] __lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowercase : str = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: __lowercase : Any = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) __lowercase : List[Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): __lowercase : List[str] = weights[F'layers_{lyr_num}'] __lowercase : Any = ly_weight["attention"] __lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowercase : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: __lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) __lowercase : Any = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) __lowercase : List[str] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) __lowercase : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __lowercase : str = weights[F'layers_{lyr_num}'] __lowercase : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) __lowercase : Dict = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowercase : List[Any] = ly_weight["self_attention"] __lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowercase : Union[str, Any] = ly_weight["MultiHeadDotProductAttention_0"] __lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowercase : List[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) __lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowercase : Any = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowercase : Tuple = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) __lowercase : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]: __lowercase : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __lowercase : Dict = jnp.tree_util.tree_map(onp.array , UpperCamelCase__ ) __lowercase : List[str] = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] __lowercase : str = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) __lowercase : Tuple = inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ ) __lowercase : int = inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ ) __lowercase : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) __lowercase : List[str] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowercase : Any = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowercase : Dict = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __lowercase : List[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCamelCase__ ) __lowercase : List[Any] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCamelCase__ ) __lowercase : int = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCamelCase__ ) __lowercase : Tuple = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) __lowercase : List[Any] = SpectrogramDiffusionPipeline( notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F'{MODEL}/checkpoint_500000', type=str, required=False, help="Path to the original jax model checkpoint.", ) __lowerCAmelCase : Any = parser.parse_args() main(args)
156
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ): _A : Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _A : Dict = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _A : Dict = format_type def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ): _A : Union[str, Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _A : Union[str, Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ): _A : List[str] = get_format_type_from_alias(UpperCamelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCamelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
11
0
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = RoCBertTokenizer _UpperCAmelCase :Tuple = None _UpperCAmelCase :Optional[int] = False _UpperCAmelCase :str = True _UpperCAmelCase :Union[str, Any] = filter_non_english def _snake_case ( self ): super().setUp() lowercase__: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] lowercase__: Optional[Any] = {} lowercase__: Optional[Any] = {} for i, value in enumerate(__lowerCamelCase ): lowercase__: Any = i lowercase__: Optional[Any] = i lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] ) lowercase__: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer: json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase ) with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer: json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase ) def _snake_case ( self ): lowercase__: List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowercase__: Optional[Any] = tokenizer.tokenize('''你好[SEP]你是谁''' ) self.assertListEqual(__lowerCamelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] ) def _snake_case ( self ): lowercase__: Tuple = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def _snake_case ( self ): lowercase__: Dict = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case ( self ): lowercase__: List[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def _snake_case ( self ): lowercase__: List[str] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case ( self ): lowercase__: int = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def _snake_case ( self ): lowercase__: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case ( self ): lowercase__: Tuple = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case ( self ): lowercase__: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case ( self ): lowercase__: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def _snake_case ( self ): lowercase__: Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowercase__: Optional[int] = {} for i, token in enumerate(__lowerCamelCase ): lowercase__: List[str] = i lowercase__: Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def _snake_case ( self ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def _snake_case ( self ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def _snake_case ( self ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def _snake_case ( self ): lowercase__: Dict = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) if self.test_rust_tokenizer: lowercase__: List[Any] = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) def _snake_case ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowercase__: int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowercase__: Optional[int] = tokenizer_r.encode_plus( __lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , ) lowercase__: Tuple = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , '''do_lower_case''' ) else False lowercase__: Tuple = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def _snake_case ( self ): lowercase__: Tuple = ["的", "人", "有"] lowercase__: int = "".join(__lowerCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__: Union[str, Any] = True lowercase__: Tuple = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowercase__: str = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowercase__: List[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowercase__: Tuple = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowercase__: Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase ) lowercase__: Dict = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowercase__: Union[str, Any] = False lowercase__: List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowercase__: str = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowercase__: Dict = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowercase__: Optional[Any] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowercase__: Dict = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase ) lowercase__: int = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". lowercase__: Tuple = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase ) ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def _snake_case ( self ): lowercase__: Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowercase__: Optional[int] = tokenizer.encode('''你好''' , add_special_tokens=__lowerCamelCase ) lowercase__: List[Any] = tokenizer.encode('''你是谁''' , add_special_tokens=__lowerCamelCase ) lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) lowercase__: int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _snake_case ( self ): lowercase__: Tuple = self.get_tokenizers(do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): lowercase__: Any = "你好,你是谁" lowercase__: Dict = tokenizer.tokenize(__lowerCamelCase ) lowercase__: Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) lowercase__: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ) lowercase__: str = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ) lowercase__: Tuple = tokenizer.prepare_for_model( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowercase__: List[Any] = tokenizer.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase )
177
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) _A : int = (boundary[1] - boundary[0]) / steps _A : Any = boundary[0] _A : List[Any] = boundary[1] _A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : str = 0.0 y += (h / 2.0) * f(UpperCamelCase__ ) for i in x_i: # print(i) y += h * f(UpperCamelCase__ ) y += (h / 2.0) * f(UpperCamelCase__ ) return y def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): _A : Optional[int] = a + h while x < (b - h): yield x _A : Dict = x + h def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here _A : Any = (x - 0) * (x - 0) return y def _UpperCAmelCase (): _A : Optional[Any] = 0.0 # Lower bound of integration _A : Optional[int] = 1.0 # Upper bound of integration _A : List[Any] = 10.0 # define number of steps or resolution _A : Any = [a, b] # define boundary of integration _A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ ) print(f"y = {y}" ) if __name__ == "__main__": main()
11
0
from __future__ import annotations from PIL import Image # Define glider example SCREAMING_SNAKE_CASE :Union[str, Any] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example SCREAMING_SNAKE_CASE :Dict = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" __A = [] for i in range(len(UpperCamelCase__ ) ): __A = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __A = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(UpperCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(UpperCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __A = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(UpperCamelCase__ ) return next_generation def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" __A = [] for _ in range(UpperCamelCase__ ): # Create output image __A = Image.new("RGB" , (len(cells[0] ), len(UpperCamelCase__ )) ) __A = img.load() # Save cells to image for x in range(len(UpperCamelCase__ ) ): for y in range(len(cells[0] ) ): __A = 2_5_5 - cells[y][x] * 2_5_5 __A = (colour, colour, colour) # Save image images.append(UpperCamelCase__ ) __A = new_generation(UpperCamelCase__ ) return images if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[str] = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
15
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @parameterized.expand([(None,), ("foo.json",)]) def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]: _A : str = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) _A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , __lowerCamelCase) def _lowerCamelCase ( self) -> Optional[int]: _A : Optional[int] = AutoConfig.from_pretrained("gpt2") _A : int = GenerationConfig.from_model_config(__lowerCamelCase) _A : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCamelCase , __lowerCamelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _lowerCamelCase ( self) -> Optional[Any]: _A : Optional[Any] = GenerationConfig() _A : List[Any] = { "max_new_tokens": 1_0_2_4, "foo": "bar", } _A : List[str] = copy.deepcopy(__lowerCamelCase) _A : int = generation_config.update(**__lowerCamelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCamelCase , __lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCamelCase , {"foo": "bar"}) def _lowerCamelCase ( self) -> Any: _A : int = GenerationConfig() _A : int = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__lowerCamelCase) _A : Any = GenerationConfig.from_pretrained(__lowerCamelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") _A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase) assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config def _lowerCamelCase ( self) -> List[str]: _A : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __lowerCamelCase) self.assertEqual(default_config.num_beams , 1) _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __lowerCamelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCamelCase) _A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __lowerCamelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def _lowerCamelCase ( cls) -> Optional[int]: _A : Dict = TOKEN HfFolder.save_token(__lowerCamelCase) @classmethod def _lowerCamelCase ( cls) -> List[Any]: try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def _lowerCamelCase ( self) -> Any: _A : Optional[int] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) _A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Union[str, Any] = GenerationConfig( do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) _A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token) _A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
11
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowerCamelCase__ = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Tuple ='sequence-classification' def __init__( self : List[str] , __lowercase : Any ): '''simple docstring''' if type(__lowerCamelCase ) == dict: __a = Namespace(**__lowerCamelCase ) __a = glue_output_modes[hparams.task] __a = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCamelCase , __lowerCamelCase , self.mode ) def UpperCamelCase_ ( self : List[Any] , **__lowercase : Tuple ): '''simple docstring''' return self.model(**__lowerCamelCase ) def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : List[str] ): '''simple docstring''' __a = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __a = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __a = self(**__lowerCamelCase ) __a = outputs[0] __a = self.trainer.lr_schedulers[0]["scheduler"] __a = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' __a = self.hparams __a = processors[args.task]() __a = processor.get_labels() for mode in ["train", "dev"]: __a = self._feature_file(__lowerCamelCase ) if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCamelCase ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __a = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) __a = convert_examples_to_features( __lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCamelCase ) torch.save(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Optional[Any] = False ): '''simple docstring''' __a = "dev" if mode == "test" else mode __a = self._feature_file(__lowerCamelCase ) logger.info("""Loading features from cached file %s""" , __lowerCamelCase ) __a = torch.load(__lowerCamelCase ) __a = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __a = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __a = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , ) def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str , __lowercase : Any ): '''simple docstring''' __a = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __a = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None __a = self(**__lowerCamelCase ) __a = outputs[:2] __a = logits.detach().cpu().numpy() __a = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCamelCase_ ( self : Optional[int] , __lowercase : Any ): '''simple docstring''' __a = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item() __a = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __a = np.argmax(__lowerCamelCase , axis=1 ) elif self.hparams.glue_output_mode == "regression": __a = np.squeeze(__lowerCamelCase ) __a = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __a = [[] for _ in range(out_label_ids.shape[0] )] __a = [[] for _ in range(out_label_ids.shape[0] )] __a = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCamelCase , __lowerCamelCase )} __a = dict(results.items() ) __a = results return ret, preds_list, out_label_list def UpperCamelCase_ ( self : List[str] , __lowercase : Any ): '''simple docstring''' __a = self._eval_end(__lowerCamelCase ) __a = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCamelCase_ ( self : int , __lowercase : Dict ): '''simple docstring''' __a = self._eval_end(__lowerCamelCase ) __a = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCamelCase_ ( __lowercase : str , __lowercase : Optional[Any] ): '''simple docstring''' BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase ) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCamelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser def lowerCAmelCase__ ( ): """simple docstring""" __a = argparse.ArgumentParser() add_generic_args(UpperCamelCase__ , os.getcwd() ) __a = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() ) __a = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __a = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) __a = GLUETransformer(UpperCamelCase__ ) __a = generic_train(UpperCamelCase__ , UpperCamelCase__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __a = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=UpperCamelCase__ ) ) __a = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(UpperCamelCase__ ) if __name__ == "__main__": main()
302
import pickle import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str: _A : Optional[int] = bp_numa _A : Dict = bp_numa _A : Tuple = bp_numa _A : List[str] = conva_get[:2] _A : Tuple = conva_get[2] _A : Optional[int] = size_pa _A : Optional[Any] = rate_w _A : Optional[Any] = rate_t _A : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] _A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) _A : Any = -2 * np.random.rand(self.conva[1]) + 1 _A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1 _A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1 def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # save model dict with pickle _A : Dict = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowerCamelCase , "wb") as f: pickle.dump(__lowerCamelCase , __lowerCamelCase) print(F"Model saved: {save_path}") @classmethod def _lowerCamelCase ( cls , __lowerCamelCase) -> Any: # read saved model with open(__lowerCamelCase , "rb") as f: _A : Any = pickle.load(__lowerCamelCase) # noqa: S301 _A : Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) _A : str = model_dic.get("size_pooling1") _A : List[str] = model_dic.get("num_bp1") _A : Union[str, Any] = model_dic.get("num_bp2") _A : List[Any] = model_dic.get("num_bp3") _A : Dict = model_dic.get("rate_weight") _A : List[Any] = model_dic.get("rate_thre") # create model instance _A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # modify model parameter _A : List[Any] = model_dic.get("w_conv1") _A : Union[str, Any] = model_dic.get("wkj") _A : str = model_dic.get("vji") _A : List[str] = model_dic.get("thre_conv1") _A : Optional[Any] = model_dic.get("thre_bp2") _A : Dict = model_dic.get("thre_bp3") return conv_ins def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: return 1 / (1 + np.exp(-1 * x)) def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: return round(__lowerCamelCase , 3) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]: # convolution process _A : Tuple = convs[0] _A : Union[str, Any] = convs[1] _A : List[Any] = np.shape(__lowerCamelCase)[0] # get the data slice of original image data, data_focus _A : Tuple = [] for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase): _A : Optional[int] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowerCamelCase) # calculate the feature map of every single kernel, and saved as list of matrix _A : Optional[Any] = [] _A : Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(__lowerCamelCase): _A : Optional[int] = [] for i_focus in range(len(__lowerCamelCase)): _A : Any = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(__lowerCamelCase)) _A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape( __lowerCamelCase , __lowerCamelCase) data_featuremap.append(__lowerCamelCase) # expanding the data slice to One dimenssion _A : Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowerCamelCase)) _A : Dict = np.asarray(__lowerCamelCase) return focus_list, data_featuremap def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict: # pooling process _A : Optional[Any] = len(featuremaps[0]) _A : str = int(size_map / size_pooling) _A : Optional[int] = [] for i_map in range(len(__lowerCamelCase)): _A : int = featuremaps[i_map] _A : Optional[int] = [] for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase): for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase): _A : str = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowerCamelCase)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowerCamelCase)) _A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase) featuremap_pooled.append(__lowerCamelCase) return featuremap_pooled def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple: # expanding three dimension data to one dimension list _A : Tuple = [] for i in range(len(__lowerCamelCase)): _A : Union[str, Any] = np.shape(data[i]) _A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1]) _A : Optional[Any] = data_listed.getA().tolist()[0] data_expanded.extend(__lowerCamelCase) _A : Optional[Any] = np.asarray(__lowerCamelCase) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]: # expanding matrix to one dimension list _A : List[Any] = np.asarray(__lowerCamelCase) _A : Union[str, Any] = np.shape(__lowerCamelCase) _A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Dict = [] _A : Any = 0 for i_map in range(__lowerCamelCase): _A : Union[str, Any] = np.ones((size_map, size_map)) for i in range(0 , __lowerCamelCase , __lowerCamelCase): for j in range(0 , __lowerCamelCase , __lowerCamelCase): _A : List[Any] = pd_pool[ i_pool ] _A : Tuple = i_pool + 1 _A : Optional[Any] = np.multiply( __lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(__lowerCamelCase) return pd_all def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]: # model traning print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase))) print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase))) _A : Tuple = 0 _A : Dict = [] _A : Optional[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: _A : Union[str, Any] = 0 print(F"-------------Learning Time {rp}--------------") for p in range(len(__lowerCamelCase)): # print('------------Learning Image: %d--------------'%p) _A : str = np.asmatrix(datas_train[p]) _A : Union[str, Any] = np.asarray(datas_teach[p]) _A , _A : Any = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = np.shape(__lowerCamelCase) _A : List[str] = self._expand(__lowerCamelCase) _A : Tuple = data_bp_input _A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa _A : List[Any] = self.sig(__lowerCamelCase) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa _A : List[str] = self.sig(__lowerCamelCase) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _A : int = np.multiply( (data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Optional[Any] = np.multiply( np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa))) _A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji) _A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) _A : Dict = pd_conva_pooled.T.getA().tolist() _A : Optional[Any] = self._calculate_gradient_from_pool( __lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): _A : int = self._expand_mat(pd_conva_all[k_conv]) _A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase) _A : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) _A : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer _A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight _A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre _A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _A : Optional[int] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _A : Any = rp + 1 _A : Dict = error_count / patterns all_mse.append(__lowerCamelCase) def draw_error(): _A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(__lowerCamelCase , "+-") plt.plot(__lowerCamelCase , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(__lowerCamelCase , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def _lowerCamelCase ( self , __lowerCamelCase) -> int: # model predict _A : Union[str, Any] = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase))) for p in range(len(__lowerCamelCase)): _A : int = np.asmatrix(datas_test[p]) _A , _A : List[Any] = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : str = self.pooling(__lowerCamelCase , self.size_poolinga) _A : Optional[int] = self._expand(__lowerCamelCase) _A : List[Any] = data_bp_input _A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa _A : int = self.sig(__lowerCamelCase) _A : int = bp_outa * self.wkj.T - self.thre_bpa _A : Optional[int] = self.sig(__lowerCamelCase) produce_out.extend(bp_outa.getA().tolist()) _A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out] return np.asarray(__lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: # return the data of image after convoluting process so we can check it out _A : Optional[int] = np.asmatrix(__lowerCamelCase) _A , _A : Tuple = self.convolute( __lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
11
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase__ ( snake_case__ ): _UpperCAmelCase :Union[str, Any] = ["image_processor", "tokenizer"] _UpperCAmelCase :str = "LayoutLMv3ImageProcessor" _UpperCAmelCase :Optional[int] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , **snake_case__ : int ): lowerCamelCase_ : Optional[Any] =None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __lowerCamelCase , ) lowerCamelCase_ : List[str] =kwargs.pop("feature_extractor" ) lowerCamelCase_ : Union[str, Any] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] = None , snake_case__ : Any = None , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = None , snake_case__ : Tuple = True , snake_case__ : List[Any] = False , snake_case__ : Optional[int] = None , snake_case__ : Dict = None , snake_case__ : List[str] = 0 , snake_case__ : Any = None , snake_case__ : Optional[int] = None , snake_case__ : Union[str, Any] = None , snake_case__ : List[Any] = False , snake_case__ : Tuple = False , snake_case__ : str = False , snake_case__ : Optional[int] = False , snake_case__ : str = True , snake_case__ : List[Any] = None , **snake_case__ : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowerCamelCase_ : List[Any] =self.image_processor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase_ : Dict =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ : Tuple =features["words"] lowerCamelCase_ : List[str] =self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) # add pixel values lowerCamelCase_ : Dict =features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCamelCase_ : int =self.get_overflowing_images(__lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCamelCase_ : int =images return encoded_inputs def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase_ : Optional[Any] =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F""" {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" ) return images_with_overflow def UpperCAmelCase__ ( self : Dict , *snake_case__ : List[Any] , **snake_case__ : List[Any] ): return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ): return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def UpperCAmelCase__ ( self : List[str] ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCAmelCase__ ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : Tuple ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , ) return self.image_processor
144
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowerCAmelCase__ = object() # For specifying empty leaf dict `{}` lowerCAmelCase__ = object() def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ): _A : str = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def _UpperCAmelCase (UpperCamelCase__ : str ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def _UpperCAmelCase (): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _UpperCAmelCase (UpperCamelCase__ : List[str] ): _A : int = _get_partition_rules() _A : Optional[int] = _replacement_rules(UpperCamelCase__ ) _A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
11
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__: Union[str, Any] = logging.get_logger(__name__) a__: List[str] = { 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = '''data2vec-text''' def __init__( self,__lowerCamelCase=3_0522,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=1E-12,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,__lowerCamelCase="absolute",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,): super().__init__(pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,**__lowerCamelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): @property def UpperCamelCase ( self ): if self.task == "multiple-choice": A__ = {0: "batch", 1: "choice", 2: "sequence"} else: A__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
193
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __SCREAMING_SNAKE_CASE :Optional[int] = get_tests_dir('''fixtures''') class A_ ( unittest.TestCase ): def lowercase ( self : Any ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 5_0_0 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def lowercase ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def lowercase ( self : Union[str, Any] ): with self.assertRaises(__lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__lowerCamelCase ) @is_staging_test class A_ ( unittest.TestCase ): @classmethod def lowercase ( cls : Dict ): _UpperCAmelCase = TOKEN HfFolder.save_token(__lowerCamelCase ) @classmethod def lowercase ( cls : int ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__lowerCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowerCamelCase , repo_id="test-image-processor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def lowercase ( self : List[str] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__lowerCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def lowercase ( self : List[str] ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__lowerCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=__lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
22
from __future__ import annotations def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _A : Dict = list(range(len(UpperCamelCase__ ) ) ) _A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _A : float = 0 _A : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _A : Union[str, Any] = 1 max_value += value[i] capacity -= weight[i] else: _A : Optional[Any] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
11
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A ='platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _snake_case : lowerCAmelCase :List[Any] = PegasusConfig lowerCAmelCase :Any = {} lowerCAmelCase :str = '''gelu''' def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=20 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ): UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : Optional[Any] = num_attention_heads UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Any = eos_token_id UpperCAmelCase__ : int = pad_token_id UpperCAmelCase__ : Optional[Any] = bos_token_id def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size) UpperCAmelCase__ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1) UpperCAmelCase__ : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase__ : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase__ : Dict = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) return config, inputs_dict def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : List[str] = 20 UpperCAmelCase__ : Optional[Any] = model_class_name(__lowerCamelCase) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""]) UpperCAmelCase__ : List[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase) UpperCAmelCase__ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""") UpperCAmelCase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : str = model.decode( decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") UpperCAmelCase__ : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , ) UpperCAmelCase__ : List[str] = model.decode(__lowerCamelCase , __lowerCamelCase) UpperCAmelCase__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''') def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = 20 UpperCAmelCase__ : Union[str, Any] = model_class_name(__lowerCamelCase) UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""]) UpperCAmelCase__ : str = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) UpperCAmelCase__ : Optional[int] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase__ : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) UpperCAmelCase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""") UpperCAmelCase__ : List[str] = model.decode( decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , ) UpperCAmelCase__ : Optional[int] = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase) UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''') def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ): if attention_mask is None: UpperCAmelCase__ : Tuple = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase__ : List[Any] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _snake_case ( a__ , unittest.TestCase ): lowerCAmelCase :List[str] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowerCAmelCase :Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowerCAmelCase :List[str] = True lowerCAmelCase :Tuple = False lowerCAmelCase :Optional[int] = False lowerCAmelCase :Optional[Any] = False def snake_case__ ( self): UpperCAmelCase__ : List[str] = FlaxPegasusModelTester(self) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase) def snake_case__ ( self): self.config_tester.run_common_tests() def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def snake_case__ ( self): UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def snake_case__ ( self): UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase__ : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = model_class(__lowerCamelCase) @jax.jit def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase): return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase) with self.subTest("""JIT Enabled"""): UpperCAmelCase__ : Any = encode_jitted(**__lowerCamelCase).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): UpperCAmelCase__ : Union[str, Any] = encode_jitted(**__lowerCamelCase).to_tuple() self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase)) for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase): self.assertEqual(jitted_output.shape , output.shape) def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): UpperCAmelCase__ : int = model_class(__lowerCamelCase) UpperCAmelCase__ : Dict = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""]) UpperCAmelCase__ : Union[str, Any] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase): return model.decode( decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , ) with self.subTest("""JIT Enabled"""): UpperCAmelCase__ : List[str] = decode_jitted(**__lowerCamelCase).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = decode_jitted(**__lowerCamelCase).to_tuple() self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase)) for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase): self.assertEqual(jitted_output.shape , output.shape) @slow def snake_case__ ( self): for model_class_name in self.all_model_classes: UpperCAmelCase__ : Tuple = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = np.ones((1, 1)) UpperCAmelCase__ : Optional[int] = model(__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) @slow def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""") UpperCAmelCase__ : str = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""") UpperCAmelCase__ : Any = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] UpperCAmelCase__ : List[Any] = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] UpperCAmelCase__ : Any = tokenizer(__lowerCamelCase , return_tensors="""np""" , truncation=__lowerCamelCase , max_length=512 , padding=__lowerCamelCase) UpperCAmelCase__ : List[Any] = model.generate(**__lowerCamelCase , num_beams=2).sequences UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase) assert tgt_text == decoded
163
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase)
11
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : int = logging.get_logger(__name__) __A : List[Any] = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : List[Any] = "deit" def __init__( self : Dict , A : str=7_68 , A : str=12 , A : List[Any]=12 , A : List[str]=30_72 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : Optional[Any]=0.0 , A : List[Any]=0.02 , A : Optional[Any]=1e-12 , A : Any=2_24 , A : Tuple=16 , A : List[str]=3 , A : Tuple=True , A : Union[str, Any]=16 , **A : str , ) -> List[Any]: super().__init__(**__lowerCamelCase ) lowercase_ : Dict = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : List[str] = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : Optional[int] = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : int = initializer_range lowercase_ : Any = layer_norm_eps lowercase_ : Tuple = image_size lowercase_ : Union[str, Any] = patch_size lowercase_ : str = num_channels lowercase_ : Dict = qkv_bias lowercase_ : Dict = encoder_stride class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : int = version.parse("1.11" ) @property def A ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def A ( self : List[str] ) -> float: return 1e-4
33
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]: _A : int = parent _A : Optional[Any] = batch_size _A : str = image_size _A : Tuple = patch_size _A : Tuple = num_channels _A : Optional[int] = embed_dim _A : Dict = depths _A : Any = num_heads _A : Any = window_size _A : int = mlp_ratio _A : Any = qkv_bias _A : Union[str, Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Dict = drop_path_rate _A : List[Any] = hidden_act _A : Any = use_absolute_embeddings _A : Optional[int] = patch_norm _A : Tuple = layer_norm_eps _A : List[str] = initializer_range _A : Optional[int] = is_training _A : Optional[Any] = scope _A : Optional[int] = use_labels _A : Dict = type_sequence_label_size _A : str = encoder_stride _A : Optional[int] = out_features _A : Optional[int] = out_indices def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _A : Optional[Any] = None if self.use_labels: _A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _A : Optional[int] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]: _A : Dict = MaskFormerSwinModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : int = model(__lowerCamelCase) _A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict: _A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : Dict = model(__lowerCamelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4]) # verify ValueError with self.parent.assertRaises(__lowerCamelCase): _A : Union[str, Any] = ["stem"] _A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase) def _lowerCamelCase ( self) -> Dict: _A : Any = self.prepare_config_and_inputs() _A , _A , _A : List[Any] = config_and_inputs _A : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def _lowerCamelCase ( self) -> str: _A : Union[str, Any] = MaskFormerSwinModelTester(self) _A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" )) def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self) -> str: return def _lowerCamelCase ( self) -> List[Any]: _A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase) def _lowerCamelCase ( self) -> Union[str, Any]: _A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase) @unittest.skip("Swin does not use inputs_embeds") def _lowerCamelCase ( self) -> str: pass @unittest.skip("Swin does not support feedforward chunking") def _lowerCamelCase ( self) -> List[Any]: pass def _lowerCamelCase ( self) -> Optional[int]: _A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : Union[str, Any] = model_class(__lowerCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _A : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear)) def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : int = model_class(__lowerCamelCase) _A : Optional[int] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : int = [*signature.parameters.keys()] _A : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions") def _lowerCamelCase ( self) -> Tuple: pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]: _A : Any = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() with torch.no_grad(): _A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)) _A : Tuple = outputs.hidden_states _A : Any = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase) # Swin has a different seq_length _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCamelCase ( self) -> Dict: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Optional[int] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) def _lowerCamelCase ( self) -> Tuple: _A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _A : Optional[int] = 3 _A : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _A : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _A : List[Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A : Union[str, Any] = True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width)) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> List[str]: pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin") def _lowerCamelCase ( self) -> str: pass def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__lowerCamelCase): _A : Optional[int] = 0 return t def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}): with torch.no_grad(): _A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase) _A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple() def recursive_check(__lowerCamelCase , __lowerCamelCase): if isinstance(__lowerCamelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase): recursive_check(__lowerCamelCase , __lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(__lowerCamelCase , __lowerCamelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=( "Tuple and dict output are not equal. Difference:" F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has" F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}." ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase) for model_class in self.all_model_classes: _A : List[Any] = model_class(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase) _A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) _A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) _A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) _A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True}) @require_torch class lowerCAmelCase__ ( unittest.TestCase , a): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE = MaskFormerSwinConfig def _lowerCamelCase ( self) -> Optional[Any]: _A : Tuple = MaskFormerSwinModelTester(self) def _lowerCamelCase ( self) -> Optional[Any]: _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _A : Union[str, Any] = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: _A : Optional[Any] = backbone_class(__lowerCamelCase) backbone.to(__lowerCamelCase) backbone.eval() _A : List[Any] = backbone(**__lowerCamelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True _A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _A , _A , _A : List[str] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: _A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase) self.assertIsNotNone(outputs.attentions)
11
0
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_ckpt" , type=UpperCamelCase__ , default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs" , type=UpperCamelCase__ , default=5 ) parser.add_argument("--batch_size" , type=UpperCamelCase__ , default=6 ) parser.add_argument("--gradient_accumulation_steps" , type=UpperCamelCase__ , default=1 ) parser.add_argument("--freeze" , type=UpperCamelCase__ , default=UpperCamelCase__ ) parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=5e-4 ) parser.add_argument("--seed" , type=UpperCamelCase__ , default=0 ) parser.add_argument("--lr_scheduler_type" , type=UpperCamelCase__ , default="cosine" ) parser.add_argument("--num_warmup_steps" , type=UpperCamelCase__ , default=10 ) parser.add_argument("--weight_decay" , type=UpperCamelCase__ , default=0.01 ) parser.add_argument("--output_dir" , type=UpperCamelCase__ , default="./results" ) return parser.parse_args() A : Dict = load("accuracy") def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = eval_pred __lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=1 ) return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ ) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCAmelCase = trainer def snake_case ( self , __a , __a , __a , **__a ): if control.should_evaluate: __lowerCAmelCase = deepcopy(__lowerCamelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = get_args() set_seed(args.seed ) __lowerCAmelCase = load_dataset("codeparrot/codecomplex" , split="train" ) __lowerCAmelCase = dataset.train_test_split(test_size=0.2 ) __lowerCAmelCase = train_test["test"].train_test_split(test_size=0.5 ) __lowerCAmelCase = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) __lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) __lowerCAmelCase = tokenizer.eos_token __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) __lowerCAmelCase = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): __lowerCAmelCase = False __lowerCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(_UpperCamelCase ): __lowerCAmelCase = tokenizer(example["src"] , truncation=UpperCamelCase__ , max_length=1024 ) __lowerCAmelCase = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } __lowerCAmelCase = train_test_validation.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation["train"].column_names , ) __lowerCAmelCase = DataCollatorWithPadding(tokenizer=UpperCamelCase__ ) __lowerCAmelCase = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , ) __lowerCAmelCase = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) print("Training..." ) trainer.add_callback(CustomCallback(UpperCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
57
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class __a (unittest.TestCase): '''simple docstring''' def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : str = tokenizer("""This is me""" , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ : str = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(**__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Dict = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : int = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model_reloaded.generate(**__lowerCamelCase ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) ) def _a ( self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "hf-internal-testing/tiny-random-t5" SCREAMING_SNAKE_CASE__ : Any = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__lowerCamelCase ): model.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ : int = model.reverse_bettertransformer() model.save_pretrained(__lowerCamelCase )
132
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
0
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : str = VQModel A__ : Union[str, Any] = '''sample''' @property def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[int]=(32, 32) ): __lowercase : Optional[int] = 4 __lowercase : Tuple = 3 __lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase ) return {"sample": image} @property def snake_case_ ( self : str ): return (3, 32, 32) @property def snake_case_ ( self : Union[str, Any] ): return (3, 32, 32) def snake_case_ ( self : Optional[Any] ): __lowercase : List[Any] = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } __lowercase : int = self.dummy_input return init_dict, inputs_dict def snake_case_ ( self : Optional[int] ): pass def snake_case_ ( self : Tuple ): pass def snake_case_ ( self : int ): __lowercase : List[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(__lowerCamelCase ) __lowercase : str = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def snake_case_ ( self : int ): __lowercase : Optional[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(__lowerCamelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) __lowercase : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) __lowercase : Optional[int] = image.to(__lowerCamelCase ) with torch.no_grad(): __lowercase : List[str] = model(__lowerCamelCase ).sample __lowercase : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __lowercase : Optional[Any] = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] ) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
156
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
0
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers __A = float("nan") class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase ): lowercase__: List[Any] = sys.stdout lowercase__: str = open(__lowerCamelCase , '''a''' ) def __getattr__( self , _UpperCAmelCase ): return getattr(self.stdout , __lowerCamelCase ) def _snake_case ( self , _UpperCAmelCase ): self.stdout.write(__lowerCamelCase ) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , __lowerCamelCase , 0 , re.M ) ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=8_0 , __UpperCAmelCase=False ) -> Dict: lowercase__: Tuple = [] # deal with critical env vars lowercase__: Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: lowercase__: Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(F"""{key}={val}""" ) # python executable (not always needed if the script is executable) lowercase__: Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowercase__: Tuple = [] lowercase__: Dict = "" while len(UpperCamelCase__ ) > 0: current_line += F"""{cmd.pop(0 )} """ if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) lowercase__: Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: # unwrap multi-line input lowercase__: Union[str, Any] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own lowercase__: int = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += F""" --output_dir {output_dir}""" # ensure we have --overwrite_output_dir lowercase__: int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , ) lowercase__: Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams lowercase__: Tuple = variation.replace(''' ''' , '''-''' ) with open(Path(UpperCamelCase__ ) / F"""log.{prefix}.stdout.txt""" , '''w''' ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / F"""log.{prefix}.stderr.txt""" , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(F"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f: lowercase__: List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: lowercase__: Union[str, Any] = [] lowercase__: Optional[int] = [] lowercase__: Any = F"""{id}: {variation:<{longest_variation_len}}""" lowercase__: Dict = F"""{preamble}: """ lowercase__: Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): lowercase__: Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase__: Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" lowercase__: str = F"""\33[2K\r{outcome}""" if len(UpperCamelCase__ ) > 0: lowercase__: List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} lowercase__: Any = round(mean_metrics[target_metric_key] , 2 ) lowercase__: Tuple = F"""{outcome} {mean_target}""" if len(UpperCamelCase__ ) > 1: results_str += F""" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}""" print(UpperCamelCase__ ) lowercase__: Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: lowercase__: int = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return F"""\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: lowercase__: Any = pd.DataFrame(UpperCamelCase__ ) lowercase__: List[str] = "variation" lowercase__: List[Any] = "diff_%" lowercase__: int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan lowercase__: int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel lowercase__: List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): lowercase__: Optional[Any] = df.apply( lambda __UpperCAmelCase : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns lowercase__: Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] lowercase__: Any = df.reindex(UpperCamelCase__ , axis='''columns''' ) # reorder cols # capitalize lowercase__: Tuple = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible lowercase__: List[str] = df.rename(lambda __UpperCAmelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) lowercase__: Union[str, Any] = df.rename(lambda __UpperCAmelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) lowercase__: Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt='''.2f''' )] print('''\n\n'''.join(UpperCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: lowercase__: int = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs='''+''' , required=UpperCamelCase__ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=UpperCamelCase__ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=UpperCamelCase__ , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=UpperCamelCase__ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=UpperCamelCase__ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) lowercase__: int = parser.parse_args() lowercase__: Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) lowercase__: Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations lowercase__: Dict = [list(map(str.strip , re.split(R'''\|''' , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowercase__: Union[str, Any] = list(map(str.strip , map(''' '''.join , itertools.product(*UpperCamelCase__ ) ) ) ) lowercase__: Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys lowercase__: str = args.report_metric_keys.split() # capture prints into a log file for convenience lowercase__: Optional[int] = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt""" print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" ) print(F"""and this script's output is also piped into {report_fn}""" ) lowercase__: Tuple = Tee(UpperCamelCase__ ) print(F"""\n*** Running {len(UpperCamelCase__ )} benchmarks:""" ) print(F"""Base command: {' '.join(UpperCamelCase__ )}""" ) lowercase__: str = "variation" lowercase__: Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc='''Total completion: ''' , leave=UpperCamelCase__ ) ): lowercase__: Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
177
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 10_24, 'facebook/mbart-large-cc25': 10_24, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] __SCREAMING_SNAKE_CASE = MBartTokenizer __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it _A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , ) _A : Union[str, Any] = vocab_file _A : int = False if not self.vocab_file else True _A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) _A : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } _A : Optional[int] = src_lang if src_lang is not None else "en_XX" _A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang) _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def _lowerCamelCase ( self) -> str: return self._src_lang @src_lang.setter def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") _A : str = src_lang _A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase) _A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase) _A : Dict = tgt_lang_id return inputs def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding: _A : Any = src_lang _A : int = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) def _lowerCamelCase ( self) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang) def _lowerCamelCase ( self) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : int = self.convert_tokens_to_ids(__lowerCamelCase) _A : int = [] _A : List[str] = [self.eos_token_id, self.cur_lang_code] _A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) _A : str = self.convert_ids_to_tokens(self.suffix_tokens) _A : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase) -> None: _A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase) _A : List[Any] = [] _A : str = [self.eos_token_id, self.cur_lang_code] _A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens) _A : int = self.convert_ids_to_tokens(self.suffix_tokens) _A : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory.") return _A : int = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase): copyfile(self.vocab_file , __lowerCamelCase) return (out_vocab_file,)
11
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: SCREAMING_SNAKE_CASE :List[str] = None SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE :List[Any] = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off SCREAMING_SNAKE_CASE :int = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ["input_ids", "attention_mask"] snake_case_ = NllbTokenizer snake_case_ = [] snake_case_ = [] def __init__( self : List[str] ,A : Any=None ,A : Tuple=None ,A : Tuple="<s>" ,A : Any="</s>" ,A : Dict="</s>" ,A : Tuple="<s>" ,A : Any="<unk>" ,A : Any="<pad>" ,A : List[Any]="<mask>" ,A : Optional[Any]=None ,A : Optional[Any]=None ,A : Optional[Any]=None ,A : int=False ,**A : int ,): # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token __A = legacy_behaviour super().__init__( vocab_file=__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,src_lang=__lowerCamelCase ,tgt_lang=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,legacy_behaviour=__lowerCamelCase ,**__lowerCamelCase ,) __A = vocab_file __A = False if not self.vocab_file else True __A = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) __A = { lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __A = src_lang if src_lang is not None else "eng_Latn" __A = self.convert_tokens_to_ids(self._src_lang ) __A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : str ): return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ): __A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Any = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : Tuple ,A : List[Any] ,A : Optional[int] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[int] ,A : Tuple ,A : List[str] ,**A : Optional[Any] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) __A = src_lang __A = self(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ) __A = self.convert_tokens_to_ids(__lowerCamelCase ) __A = tgt_lang_id return inputs def UpperCamelCase_ ( self : Tuple ,A : List[Any] ,A : str = "eng_Latn" ,A : Union[str, Any] = None ,A : str = "fra_Latn" ,**A : Dict ,): __A = src_lang __A = tgt_lang return super().prepare_seqaseq_batch(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ) def UpperCamelCase_ ( self : Optional[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : Tuple ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ): __A = self.convert_tokens_to_ids(__lowerCamelCase ) if self.legacy_behaviour: __A = [] __A = [self.eos_token_id, self.cur_lang_code] else: __A = [self.cur_lang_code] __A = [self.eos_token_id] __A = self.convert_ids_to_tokens(self.prefix_tokens ) __A = self.convert_ids_to_tokens(self.suffix_tokens ) __A = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : Dict ,A : Optional[int] ): __A = self.convert_tokens_to_ids(__lowerCamelCase ) if self.legacy_behaviour: __A = [] __A = [self.eos_token_id, self.cur_lang_code] else: __A = [self.cur_lang_code] __A = [self.eos_token_id] __A = self.convert_ids_to_tokens(self.prefix_tokens ) __A = self.convert_ids_to_tokens(self.suffix_tokens ) __A = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : int ,A : str ,A : Dict = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return __A = os.path.join( __lowerCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file ,__lowerCamelCase ) return (out_vocab_file,)
15
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } lowerCAmelCase__ = '</w>' lowerCAmelCase__ = '@@ ' def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ): _A : Optional[int] = set() _A : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A : List[Any] = char return pairs # Speech2Text2 has no max input length lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24} class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__( unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , ) _A : Dict = do_lower_case with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _A : Optional[int] = json.load(__lowerCamelCase) _A : Optional[Any] = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.") _A : Optional[Any] = None _A : Tuple = None else: with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _A : Optional[int] = merges_handle.read().split("\n")[:-1] _A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges] _A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _A : List[Any] = {} @property def _lowerCamelCase ( self) -> int: return len(self.decoder) def _lowerCamelCase ( self) -> Dict: return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self , __lowerCamelCase) -> Dict: _A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _A : int = get_pairs(__lowerCamelCase) if not pairs: return token while True: _A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _A , _A : Optional[int] = bigram _A : int = [] _A : str = 0 while i < len(__lowerCamelCase): try: _A : str = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _A : str = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _A : List[str] = tuple(__lowerCamelCase) _A : List[str] = new_word if len(__lowerCamelCase) == 1: break else: _A : List[Any] = get_pairs(__lowerCamelCase) _A : Tuple = " ".join(__lowerCamelCase) if word == "\n " + BPE_TOKEN_MERGES: _A : List[str] = "\n" + BPE_TOKEN_MERGES if word.endswith(__lowerCamelCase): _A : int = word.replace(__lowerCamelCase , "") _A : int = word.replace(" " , __lowerCamelCase) _A : Union[str, Any] = word return word def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]: if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: _A : List[Any] = text.lower() _A : Optional[int] = text.split() _A : List[str] = [] for token in text: if token: split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" "))) return split_tokens def _lowerCamelCase ( self , __lowerCamelCase) -> int: return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token) return result def _lowerCamelCase ( self , __lowerCamelCase) -> str: _A : str = " ".join(__lowerCamelCase) # make sure @@ tokens are concatenated _A : int = "".join(string.split(__lowerCamelCase)) return string def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _A : Any = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _A : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _A : Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return (vocab_file, merges_file)
11
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase__ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
0
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowercase__ : def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=13 , snake_case__ : Dict=32 , snake_case__ : str=2 , snake_case__ : List[Any]=3 , snake_case__ : Optional[Any]=16 , snake_case__ : Optional[Any]=[1, 2, 1] , snake_case__ : Union[str, Any]=[2, 2, 4] , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=2.0 , snake_case__ : List[Any]=True , snake_case__ : List[Any]=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]="gelu" , snake_case__ : Dict=False , snake_case__ : int=True , snake_case__ : Any=0.02 , snake_case__ : str=1E-5 , snake_case__ : List[Any]=True , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : List[str]=8 , snake_case__ : Tuple=["stage1", "stage2", "stage3"] , snake_case__ : List[str]=[1, 2, 3] , ): lowerCamelCase_ : int =parent lowerCamelCase_ : Optional[Any] =batch_size lowerCamelCase_ : str =image_size lowerCamelCase_ : Tuple =patch_size lowerCamelCase_ : Tuple =num_channels lowerCamelCase_ : Optional[int] =embed_dim lowerCamelCase_ : Dict =depths lowerCamelCase_ : Any =num_heads lowerCamelCase_ : Any =window_size lowerCamelCase_ : int =mlp_ratio lowerCamelCase_ : Any =qkv_bias lowerCamelCase_ : Union[str, Any] =hidden_dropout_prob lowerCamelCase_ : Optional[Any] =attention_probs_dropout_prob lowerCamelCase_ : Dict =drop_path_rate lowerCamelCase_ : List[Any] =hidden_act lowerCamelCase_ : Any =use_absolute_embeddings lowerCamelCase_ : Optional[int] =patch_norm lowerCamelCase_ : Tuple =layer_norm_eps lowerCamelCase_ : List[str] =initializer_range lowerCamelCase_ : Optional[int] =is_training lowerCamelCase_ : Optional[Any] =scope lowerCamelCase_ : Optional[int] =use_labels lowerCamelCase_ : Dict =type_sequence_label_size lowerCamelCase_ : str =encoder_stride lowerCamelCase_ : Optional[int] =out_features lowerCamelCase_ : Optional[int] =out_indices def UpperCAmelCase__ ( self : Tuple ): lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ : Optional[Any] =None if self.use_labels: lowerCamelCase_ : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Optional[int] =self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Dict ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : Dict =MaskFormerSwinModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase_ : int =model(__lowerCamelCase ) lowerCamelCase_ : Any =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ : List[str] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ): lowerCamelCase_ : Optional[Any] =MaskFormerSwinBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase_ : Dict =model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__lowerCamelCase ): lowerCamelCase_ : Union[str, Any] =["stem"] lowerCamelCase_ : Union[str, Any] =MaskFormerSwinBackbone(config=__lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Any =self.prepare_config_and_inputs() lowerCamelCase_ : List[Any] =config_and_inputs lowerCamelCase_ : Optional[int] ={"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ): _UpperCAmelCase :Tuple = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _UpperCAmelCase :Dict = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :Tuple = False _UpperCAmelCase :Optional[int] = False _UpperCAmelCase :Any = False _UpperCAmelCase :Dict = False def UpperCAmelCase__ ( self : Union[str, Any] ): lowerCamelCase_ : Union[str, Any] =MaskFormerSwinModelTester(self ) lowerCamelCase_ : Optional[int] =ConfigTester(self , config_class=__lowerCamelCase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : Tuple ): return def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) @unittest.skip("Swin does not use inputs_embeds" ) def UpperCAmelCase__ ( self : Any ): pass @unittest.skip("Swin does not support feedforward chunking" ) def UpperCAmelCase__ ( self : Union[str, Any] ): pass def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Union[str, Any] =model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ : Dict =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def UpperCAmelCase__ ( self : Optional[Any] ): lowerCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : int =model_class(__lowerCamelCase ) lowerCamelCase_ : Optional[int] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : int =[*signature.parameters.keys()] lowerCamelCase_ : Optional[int] =["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def UpperCAmelCase__ ( self : Optional[int] ): pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def UpperCAmelCase__ ( self : List[str] ): pass def UpperCAmelCase__ ( self : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Any ): lowerCamelCase_ : Any =model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ : str =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) lowerCamelCase_ : Tuple =outputs.hidden_states lowerCamelCase_ : Any =getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # Swin has a different seq_length lowerCamelCase_ : Optional[int] =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self : str ): lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Any =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase_ : List[Any] =True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ : Optional[int] =True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Optional[int] =3 lowerCamelCase_ : Dict =( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ : Optional[int] =( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ : int =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ : Dict =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase_ : List[Any] =True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ : Union[str, Any] =True self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def UpperCAmelCase__ ( self : Dict ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def UpperCAmelCase__ ( self : str ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def UpperCAmelCase__ ( self : Union[str, Any] ): pass def UpperCAmelCase__ ( self : Optional[int] ): lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case__ : str ): lowerCamelCase_ : Optional[int] =0 return t def check_equivalence(snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any]={} ): with torch.no_grad(): lowerCamelCase_ : Any =model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase_ : int =model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple() def recursive_check(snake_case__ : Any , snake_case__ : Optional[int] ): if isinstance(__lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__lowerCamelCase , __lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__lowerCamelCase ) , set_nan_tensor_to_zero(__lowerCamelCase ) , atol=1E-5 ) , msg=( "Tuple and dict output are not equal. Difference:" F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" F""" {torch.isnan(__lowerCamelCase ).any()} and `inf`: {torch.isinf(__lowerCamelCase )}. Dict has""" F""" `nan`: {torch.isnan(__lowerCamelCase ).any()} and `inf`: {torch.isinf(__lowerCamelCase )}.""" ) , ) recursive_check(__lowerCamelCase , __lowerCamelCase ) for model_class in self.all_model_classes: lowerCamelCase_ : List[Any] =model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase_ : str =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase_ : Tuple =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase_ : Any =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) lowerCamelCase_ : List[Any] =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase_ : List[Any] =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase_ : str =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) lowerCamelCase_ : Union[str, Any] =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) lowerCamelCase_ : Optional[Any] =self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} ) @require_torch class lowercase__ ( unittest.TestCase, snake_case__ ): _UpperCAmelCase :Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () _UpperCAmelCase :List[str] = MaskFormerSwinConfig def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : Tuple =MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self : List[str] ): lowerCamelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Union[str, Any] =inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: lowerCamelCase_ : Optional[Any] =backbone_class(__lowerCamelCase ) backbone.to(__lowerCamelCase ) backbone.eval() lowerCamelCase_ : List[Any] =backbone(**__lowerCamelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __lowerCamelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCamelCase_ : List[str] =backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCamelCase_ : List[str] =hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCamelCase_ : int =backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase ) self.assertIsNotNone(outputs.attentions )
144
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase__( unittest.TestCase): @property def lowerCAmelCase__ ( self: Optional[int] ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model @property def lowerCAmelCase__ ( self: Optional[Any] ): torch.manual_seed(0 ) __lowerCamelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , ) return model @property def lowerCAmelCase__ ( self: Any ): torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = DDIMScheduler() __lowerCamelCase = self.dummy_vq_model __lowerCamelCase = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ ) ldm.to(UpperCamelCase_ ) ldm.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""numpy""" ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=UpperCamelCase_ )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) __lowerCamelCase = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(UpperCamelCase_ ) ldm.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type="""numpy""" ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) __lowerCamelCase = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
12
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ : int = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase__ : Union[str, Any] = frozenset([]) def lowerCAmelCase__ ( self: str ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , ) __lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) __lowerCamelCase = CLIPTextModel(UpperCamelCase_ ) __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCamelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) ) __lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) ) if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase__ ( self: str ): __lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ ) __lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: int ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , ) __lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , ) __lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowerCAmelCase__ ( self: int ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" ) __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , ) __lowerCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
12
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed UpperCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase__ ( A__ : Union[str, Any]=2 , A__ : List[Any]=3 , A__ : Optional[int]=16 , A__ : int = 10 , A__ : int = 2 ): '''simple docstring''' def get_dataset(A__ : Union[str, Any] ): __lowerCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __lowerCamelCase = get_dataset(A__ ) __lowerCamelCase = get_dataset(A__ ) __lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) __lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : Optional[Any] , A__ : Tuple , A__ : Dict , A__ : Union[str, Any]=None ): '''simple docstring''' __lowerCamelCase = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: __lowerCamelCase, __lowerCamelCase = batch __lowerCamelCase = model(A__ ) __lowerCamelCase = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCamelCase__( nn.Module): def __init__( self: Dict ): super().__init__() __lowerCamelCase = nn.Parameter(torch.randn(1 ) ) __lowerCamelCase = nn.Parameter(torch.randn(1 ) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] ): return x * self.a + self.b class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: int ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() __lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline __lowerCamelCase = Accelerator(project_config=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def lowerCAmelCase__ ( self: Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() # Train baseline __lowerCamelCase = Accelerator() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial __lowerCamelCase = os.path.join(UpperCamelCase_ , """initial""" ) accelerator.save_state(UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() __lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() __lowerCamelCase = Accelerator() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything __lowerCamelCase = os.path.join(UpperCamelCase_ , """checkpoint""" ) accelerator.save_state(UpperCamelCase_ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase_ ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() __lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline __lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() __lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() __lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ ) __lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item() __lowerCamelCase = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = torch.tensor([1, 2, 3] ) __lowerCamelCase = torch.tensor([2, 3, 4] ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(net.parameters() ) __lowerCamelCase = Accelerator() with self.assertRaises(UpperCamelCase_ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def lowerCAmelCase__ ( self: Optional[int] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __lowerCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 ) __lowerCamelCase, __lowerCamelCase = dummy_dataloaders() __lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline __lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() __lowerCamelCase = scheduler.state_dict() train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(UpperCamelCase_ , scheduler.state_dict() ) def lowerCAmelCase__ ( self: Dict ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __lowerCamelCase = DummyModel() __lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 ) # Train baseline __lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) __lowerCamelCase = accelerator.prepare(UpperCamelCase_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase_ = '/tmp/accelerate/state_checkpointing' UpperCAmelCase_ = DummyModel() UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3) UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders() UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: UpperCAmelCase_ = group['params'][0].device break assert param_device.type == accelerator.device.type UpperCAmelCase_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: UpperCAmelCase_ = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: UpperCAmelCase_ = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
12
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase) class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[str] , *UpperCamelCase_: Any , **UpperCamelCase_: Union[str, Any] ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple=None ): __lowerCamelCase = {} if top_k is not None: __lowerCamelCase = top_k return {}, {}, postprocess_params def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase_: Optional[int] ): return super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = load_image(UpperCamelCase_ ) __lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework ) return model_inputs def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any ): __lowerCamelCase = self.model(**UpperCamelCase_ ) return model_outputs def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Union[str, Any]=5 ): if top_k > self.model.config.num_labels: __lowerCamelCase = self.model.config.num_labels if self.framework == "pt": __lowerCamelCase = model_outputs.logits.softmax(-1 )[0] __lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ ) elif self.framework == "tf": __lowerCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0] __lowerCamelCase = tf.math.top_k(UpperCamelCase_ , k=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'Unsupported framework: {self.framework}' ) __lowerCamelCase = scores.tolist() __lowerCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
12
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCamelCase__: def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ): logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) __lowerCamelCase = model __lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ ) __lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ ) def __call__( self: Dict , **UpperCamelCase_: Any ): __lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()} return self.model.run(UpperCamelCase_ , UpperCamelCase_ ) @staticmethod def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ): if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) __lowerCamelCase = """CPUExecutionProvider""" return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ): __lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME __lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name ) __lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) __lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ ) if src_path.exists(): __lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ): if os.path.isfile(UpperCamelCase_ ): logger.error(F'Provided path ({save_directory}) should be a directory, not a file' ) return os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) # saving model weights/files self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ): __lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(UpperCamelCase_ ): __lowerCamelCase = OnnxRuntimeModel.load_model( os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) __lowerCamelCase = Path(UpperCamelCase_ ) # load model from hub else: # download model __lowerCamelCase = hf_hub_download( repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , ) __lowerCamelCase = Path(UpperCamelCase_ ).parent __lowerCamelCase = Path(UpperCamelCase_ ).name __lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) return cls(model=UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ): __lowerCamelCase = None if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2: __lowerCamelCase, __lowerCamelCase = model_id.split("""@""" ) return cls._from_pretrained( model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
12
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Optional[int] = ['pixel_values'] def __init__( self: List[Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: List[str] , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = size if size is not None else {"""height""": 2_24, """width""": 2_24} __lowerCamelCase = get_size_dict(UpperCamelCase_ ) __lowerCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} __lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name="""crop_size""" ) __lowerCamelCase = do_resize __lowerCamelCase = do_rescale __lowerCamelCase = do_normalize __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = rescale_factor __lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[int] , ): __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if "shortest_edge" in size: __lowerCamelCase = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __lowerCamelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Dict , ): __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[Any] ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[Any] , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ): __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" , default_to_square=UpperCamelCase_ ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(UpperCamelCase_ ) if not is_batched(UpperCamelCase_ ): __lowerCamelCase = [images] if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
12
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase_ = 500_000 UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(__file__) UpperCAmelCase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , **A__ : Optional[Any] ): '''simple docstring''' __lowerCamelCase = dataset.map(**A__ ) @get_duration def lowerCamelCase__ ( A__ : datasets.Dataset , **A__ : Optional[Any] ): '''simple docstring''' __lowerCamelCase = dataset.filter(**A__ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) __lowerCamelCase = generate_example_dataset( os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ ) __lowerCamelCase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=A__ ) def tokenize(A__ : str ): return tokenizer(examples["""text"""] ) __lowerCamelCase = map(A__ ) __lowerCamelCase = map(A__ , batched=A__ ) __lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ ) with dataset.formatted_as(type="""numpy""" ): __lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ ) with dataset.formatted_as(type="""pandas""" ): __lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): __lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): __lowerCamelCase = map(A__ , function=lambda A__ : None , batched=A__ ) __lowerCamelCase = map(A__ , function=A__ , batched=A__ ) __lowerCamelCase = filter(A__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(A__ , """wb""" ) as f: f.write(json.dumps(A__ ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
12
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCAmelCase_ = get_logger(__name__) def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ): '''simple docstring''' os.makedirs(A__ , exist_ok=A__ ) with FSDP.state_dict_type( A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCamelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __lowerCamelCase = os.path.join(A__ , A__ ) if accelerator.process_index == 0: logger.info(f'Saving model to {output_model_file}' ) torch.save(A__ , A__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCamelCase = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __lowerCamelCase = os.path.join(A__ , A__ ) logger.info(f'Saving model to {output_model_file}' ) torch.save(A__ , A__ ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' ) os.makedirs(A__ , exist_ok=A__ ) logger.info(f'Saving model to {ckpt_dir}' ) __lowerCamelCase = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Model saved to {ckpt_dir}' ) def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(A__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return __lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' __lowerCamelCase = os.path.join(A__ , A__ ) logger.info(f'Loading model from {input_model_file}' ) __lowerCamelCase = torch.load(A__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCamelCase = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) __lowerCamelCase = os.path.join(A__ , A__ ) logger.info(f'Loading model from {input_model_file}' ) __lowerCamelCase = torch.load(A__ ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCamelCase = ( os.path.join(A__ , f'{MODEL_NAME}_{model_index}' ) if f'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(f'Loading model from {ckpt_dir}' ) __lowerCamelCase = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , ) __lowerCamelCase = state_dict["""model"""] logger.info(f'Model loaded from {ckpt_dir}' ) model.load_state_dict(A__ ) def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ): '''simple docstring''' os.makedirs(A__ , exist_ok=A__ ) with FSDP.state_dict_type( A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCamelCase = FSDP.optim_state_dict(A__ , A__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCamelCase = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __lowerCamelCase = os.path.join(A__ , A__ ) logger.info(f'Saving Optimizer state to {output_optimizer_file}' ) torch.save(A__ , A__ ) logger.info(f'Optimizer state saved in {output_optimizer_file}' ) else: __lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(A__ , exist_ok=A__ ) logger.info(f'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , ) logger.info(f'Optimizer state saved in {ckpt_dir}' ) def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCamelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCamelCase = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) __lowerCamelCase = os.path.join(A__ , A__ ) logger.info(f'Loading Optimizer state from {input_optimizer_file}' ) __lowerCamelCase = torch.load(A__ ) logger.info(f'Optimizer state loaded from {input_optimizer_file}' ) else: __lowerCamelCase = ( os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' ) if f'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(f'Loading Optimizer from {ckpt_dir}' ) __lowerCamelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , ) __lowerCamelCase = optim_state["""optimizer"""] logger.info(f'Optimizer loaded from {ckpt_dir}' ) __lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ ) optimizer.load_state_dict(A__ )
12
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs UpperCAmelCase_ = imread(r'digital_image_processing/image_data/lena_small.jpg') UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = cn.convert_to_negative(A__ ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase__ ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(A__ , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __lowerCamelCase = canny.canny(A__ ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase__ ( ): '''simple docstring''' assert gg.gaussian_filter(A__ , 5 , sigma=0.9 ).all() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __lowerCamelCase = conv.img_convolve(A__ , A__ ).astype(A__ ) assert res.any() def lowerCamelCase__ ( ): '''simple docstring''' assert med.median_filter(A__ , 3 ).any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = sob.sobel_filter(A__ ) assert grad.any() and theta.any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = sp.make_sepia(A__ , 20 ) assert sepia.all() def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __lowerCamelCase = bs.Burkes(imread(A__ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __lowerCamelCase = rs.NearestNeighbour(imread(A__ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. __lowerCamelCase = imread(A__ , 0 ) # Test for get_neighbors_pixel function() return not None __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = image[x_coordinate][y_coordinate] __lowerCamelCase = lbp.get_neighbors_pixel( A__ , A__ , A__ , A__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __lowerCamelCase = lbp.local_binary_value(A__ , A__ , A__ ) assert lbp_image.any()
12
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline UpperCAmelCase__ : Optional[Any] = ['image'] UpperCAmelCase__ : int = ['image'] UpperCAmelCase__ : Any = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] UpperCAmelCase__ : int = False @property def lowerCAmelCase__ ( self: int ): return 32 @property def lowerCAmelCase__ ( self: List[str] ): return 32 @property def lowerCAmelCase__ ( self: Any ): return self.time_input_dim * 4 @property def lowerCAmelCase__ ( self: Dict ): return 8 @property def lowerCAmelCase__ ( self: int ): torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(UpperCamelCase_ ) return model @property def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , ) return image_processor @property def lowerCAmelCase__ ( self: Tuple ): torch.manual_seed(0 ) __lowerCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } __lowerCamelCase = PriorTransformer(**UpperCamelCase_ ) return model @property def lowerCAmelCase__ ( self: List[Any] ): torch.manual_seed(0 ) __lowerCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**UpperCamelCase_ ) return model def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __lowerCamelCase = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ): __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = """cpu""" __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: List[str] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = torch_device == """cpu""" __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**UpperCamelCase_ ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) __lowerCamelCase = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = pipe( UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
12
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } UpperCAmelCase_ = { 'Salesforce/codegen-350M-mono': 2_048, } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ['input_ids', 'attention_mask'] UpperCAmelCase__ : Dict = CodeGenTokenizer def __init__( self: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Optional[int]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Any=False , **UpperCamelCase_: Tuple , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("""add_bos_token""" , UpperCamelCase_ ): __lowerCamelCase = kwargs.pop("""name_or_path""" , """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) __lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space: __lowerCamelCase = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) ) __lowerCamelCase = add_prefix_space __lowerCamelCase = pre_tok_class(**UpperCamelCase_ ) __lowerCamelCase = add_prefix_space def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: List[str] ): __lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ): __lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ): __lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[List[str]] = None , **UpperCamelCase_: Union[str, Any] , ): __lowerCamelCase = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any ): def find_re(UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ): __lowerCamelCase = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __lowerCamelCase = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __lowerCamelCase = list(re.finditer("""^print""" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowerCamelCase = completion[: prints[1].start()] __lowerCamelCase = list(re.finditer("""^def""" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowerCamelCase = completion[: defs[1].start()] __lowerCamelCase = 0 __lowerCamelCase = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
12
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ): '''simple docstring''' __lowerCamelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __lowerCamelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ): super().__init__() self.register_modules( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , ) __lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ): if latents is None: __lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) __lowerCamelCase = latents.to(UpperCamelCase_ ) __lowerCamelCase = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __lowerCamelCase = torch.device(F'cuda:{gpu_id}' ) __lowerCamelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ): if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __lowerCamelCase = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCamelCase = None for cpu_offloaded_model in [self.unet, self.movq]: __lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ ) # We'll offload the last model manually. __lowerCamelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCAmelCase__ ( self: int ): if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ): __lowerCamelCase = self._execution_device __lowerCamelCase = guidance_scale > 1.0 if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) __lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: __lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ ) __lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) __lowerCamelCase = self.scheduler.timesteps __lowerCamelCase = self.movq.config.latent_channels __lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor ) # create initial latent __lowerCamelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance __lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint} __lowerCamelCase = self.unet( sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0] if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 ) __lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 ) __lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0] # post-processing __lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: __lowerCamelCase = image * 0.5 + 0.5 __lowerCamelCase = image.clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
12
1
from math import factorial def lowerCamelCase__ ( A__ : int , A__ : int , A__ : float ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) __lowerCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! __lowerCamelCase = float(factorial(A__ ) ) coefficient /= factorial(A__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
12
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowerCamelCase__( unittest.TestCase): def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_choices __lowerCamelCase = rescale_embeddings __lowerCamelCase = attention_type __lowerCamelCase = use_bias __lowerCamelCase = block_size __lowerCamelCase = num_random_blocks def lowerCAmelCase__ ( self: int ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_attention_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs __lowerCamelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask, } return config, inputs_dict @require_flax class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : Optional[int] = False def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase__ ( self: Optional[Any] ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase__ ( self: List[Any] ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase__ ( self: List[Any] ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase__ ( self: List[str] ): super().test_hidden_states_output() @slow def lowerCAmelCase__ ( self: Optional[Any] ): for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = model_class(UpperCamelCase_ ) @jax.jit def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ): return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ ) with self.subTest("""JIT Enabled""" ): __lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("""outputs.attentions""" ): return else: super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
12
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class lowerCamelCase__( __lowerCamelCase): def __init__( self: str , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: str=10_24 , UpperCamelCase_: int=10_24 , UpperCamelCase_: Any=3.6 ): __lowerCamelCase = tokenizer __lowerCamelCase = tokenizer.bos_token_id __lowerCamelCase = dataset __lowerCamelCase = seq_length __lowerCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self: Optional[int] ): __lowerCamelCase = iter(self.dataset ) __lowerCamelCase = True while more_examples: __lowerCamelCase, __lowerCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(UpperCamelCase_ )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: __lowerCamelCase = False break __lowerCamelCase = tokenizer(UpperCamelCase_ , truncation=UpperCamelCase_ )["""input_ids"""] __lowerCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(UpperCamelCase_ ) , self.seq_length ): __lowerCamelCase = all_token_ids[i : i + self.seq_length] if len(UpperCamelCase_ ) == self.seq_length: yield torch.tensor(UpperCamelCase_ ) def lowerCamelCase__ ( A__ : Tuple ): '''simple docstring''' __lowerCamelCase = {"""streaming""": True} __lowerCamelCase = load_dataset(args.dataset_name , split="""train""" , **A__ ) __lowerCamelCase = ConstantLengthDataset(A__ , A__ , seq_length=args.seq_length ) __lowerCamelCase = DataLoader(A__ , batch_size=args.batch_size ) return eval_dataloader def lowerCamelCase__ ( A__ : List[str] ): '''simple docstring''' model.eval() __lowerCamelCase = [] for step, batch in enumerate(A__ ): with torch.no_grad(): __lowerCamelCase = model(A__ , labels=A__ ) __lowerCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(A__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __lowerCamelCase = torch.mean(torch.cat(A__ ) ) try: __lowerCamelCase = torch.exp(A__ ) except OverflowError: __lowerCamelCase = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator UpperCAmelCase_ = Accelerator() # Parse configuration UpperCAmelCase_ = HfArgumentParser(EvaluationArguments) UpperCAmelCase_ = parser.parse_args() set_seed(args.seed) # Logging UpperCAmelCase_ = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader UpperCAmelCase_ = create_dataloader(args) # Prepare everything with our `accelerator`. UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') UpperCAmelCase_ , UpperCAmelCase_ = evaluate(args) logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
12
def lowerCamelCase__ ( A__ : list ): '''simple docstring''' __lowerCamelCase = len(A__ ) for _ in range(A__ ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i] return arr if __name__ == "__main__": UpperCAmelCase_ = list(range(10, 0, -1)) print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
12
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase__ ( A__ : str , A__ : List[str] , A__ : Tuple ): '''simple docstring''' __lowerCamelCase = RemBertConfig.from_json_file(A__ ) print("""Building PyTorch model from configuration: {}""".format(str(A__ ) ) ) __lowerCamelCase = RemBertModel(A__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(A__ , A__ , A__ ) # Save pytorch-model print("""Save PyTorch model to {}""".format(A__ ) ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase_ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
12
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__: def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ): __lowerCamelCase = question_encoder __lowerCamelCase = generator __lowerCamelCase = self.question_encoder def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ): if os.path.isfile(UpperCamelCase_ ): raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" ) __lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(UpperCamelCase_ ) self.generator.save_pretrained(UpperCamelCase_ ) @classmethod def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer __lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ ) if config is None: __lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) __lowerCamelCase = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ ) def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ): return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ): return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ): return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.question_encoder def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.generator def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , UpperCamelCase_ , ) if max_length is None: __lowerCamelCase = self.current_tokenizer.model_max_length __lowerCamelCase = self( UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: __lowerCamelCase = self.current_tokenizer.model_max_length __lowerCamelCase = self( text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) __lowerCamelCase = labels["""input_ids"""] return model_inputs
12
1
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): @register_to_config def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ): super().__init__() __lowerCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ) else: __lowerCamelCase = None __lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ ) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : VQModel UpperCAmelCase__ : CLIPTextModel UpperCAmelCase__ : CLIPTokenizer UpperCAmelCase__ : TransformeraDModel UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings UpperCAmelCase__ : VQDiffusionScheduler def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ): super().__init__() self.register_modules( vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1 # get prompt text embeddings __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __lowerCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] __lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate text embeddings for each generation per prompt __lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings __lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 ) else: __lowerCamelCase = [""""""] * batch_size __lowerCamelCase = text_input_ids.shape[-1] __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , ) __lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCamelCase = negative_prompt_embeds.shape[1] __lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 ) __lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = 1 elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = len(UpperCamelCase_ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' ) __lowerCamelCase = batch_size * num_images_per_prompt __lowerCamelCase = guidance_scale > 1.0 __lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) # get the initial completely masked latents unless the user supplied it __lowerCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: __lowerCamelCase = self.transformer.num_vector_embeds - 1 __lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) __lowerCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device ) __lowerCamelCase = self.scheduler.timesteps.to(self.device ) __lowerCamelCase = latents for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the sample if we are doing classifier free guidance __lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = model_output.chunk(2 ) __lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ ) __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) # remove `log(0)`'s (`-inf`s) __lowerCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self.vqvae.config.vq_embed_dim __lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ ) __lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample __lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ): __lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ ) __lowerCamelCase = torch.exp(UpperCamelCase_ ) __lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ ) __lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) __lowerCamelCase = keep_mask[:, :-1, :] __lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) __lowerCamelCase = log_p_x_0.clone() __lowerCamelCase = -torch.inf # -inf = log(0) return rv
12
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate UpperCAmelCase_ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} UpperCAmelCase_ = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", 'emoji': True, }, } ] UpperCAmelCase_ = 0 for log in Path().glob('*.log'): UpperCAmelCase_ = 0 with open(log, 'r') as f: for line in f: UpperCAmelCase_ = json.loads(line) if line.get('nodeid', '') != "": UpperCAmelCase_ = line['nodeid'] if line.get('duration', None) is not None: UpperCAmelCase_ = f"""{line["duration"]:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) UpperCAmelCase_ = [] log.unlink() UpperCAmelCase_ = '' UpperCAmelCase_ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" UpperCAmelCase_ = [] UpperCAmelCase_ = {} for test in failed_tests: UpperCAmelCase_ = test[0].split('::') UpperCAmelCase_ = data[0].split('/')[-1] if data[0] not in filesafailed: UpperCAmelCase_ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) UpperCAmelCase_ = [test[0] for test in failed_table] UpperCAmelCase_ = list(set(files)) # Count number of instances in failed_tests UpperCAmelCase_ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) UpperCAmelCase_ = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.' UpperCAmelCase_ = len(err) + 10 UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: UpperCAmelCase_ = 'No failed tests! 🤗' print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": UpperCAmelCase_ = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) UpperCAmelCase_ = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) UpperCAmelCase_ = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) UpperCAmelCase_ = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name UpperCAmelCase_ = '' for i, row in enumerate(test_failures): if row[0] != test_class: UpperCAmelCase_ = row[0] else: UpperCAmelCase_ = '' UpperCAmelCase_ = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
12
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : List[Any] = ['image_processor', 'tokenizer'] UpperCAmelCase__ : int = 'BlipImageProcessor' UpperCAmelCase__ : Tuple = ('BertTokenizer', 'BertTokenizerFast') def __init__( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] ): __lowerCamelCase = False super().__init__(UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self.image_processor def __call__( self: str , UpperCamelCase_: ImageInput = None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: List[str] , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: __lowerCamelCase = self.tokenizer __lowerCamelCase = self.tokenizer( text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , ) return text_encoding # add pixel_values __lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) if text is not None: __lowerCamelCase = self.tokenizer( text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , ) else: __lowerCamelCase = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase_ ) return encoding_image_processor def lowerCAmelCase__ ( self: Optional[int] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Tuple ): return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ): return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.tokenizer.model_input_names __lowerCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
12
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): @register_to_config def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ): super().__init__() __lowerCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ) else: __lowerCamelCase = None __lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ ) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : VQModel UpperCAmelCase__ : CLIPTextModel UpperCAmelCase__ : CLIPTokenizer UpperCAmelCase__ : TransformeraDModel UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings UpperCAmelCase__ : VQDiffusionScheduler def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ): super().__init__() self.register_modules( vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1 # get prompt text embeddings __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __lowerCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] __lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate text embeddings for each generation per prompt __lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings __lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 ) else: __lowerCamelCase = [""""""] * batch_size __lowerCamelCase = text_input_ids.shape[-1] __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , ) __lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCamelCase = negative_prompt_embeds.shape[1] __lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 ) __lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = 1 elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = len(UpperCamelCase_ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' ) __lowerCamelCase = batch_size * num_images_per_prompt __lowerCamelCase = guidance_scale > 1.0 __lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) # get the initial completely masked latents unless the user supplied it __lowerCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: __lowerCamelCase = self.transformer.num_vector_embeds - 1 __lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) __lowerCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device ) __lowerCamelCase = self.scheduler.timesteps.to(self.device ) __lowerCamelCase = latents for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the sample if we are doing classifier free guidance __lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = model_output.chunk(2 ) __lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ ) __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) # remove `log(0)`'s (`-inf`s) __lowerCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self.vqvae.config.vq_embed_dim __lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ ) __lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample __lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ): __lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ ) __lowerCamelCase = torch.exp(UpperCamelCase_ ) __lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ ) __lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) __lowerCamelCase = keep_mask[:, :-1, :] __lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) __lowerCamelCase = log_p_x_0.clone() __lowerCamelCase = -torch.inf # -inf = log(0) return rv
12
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase__( unittest.TestCase): def __init__( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: int=3 , UpperCamelCase_: str=18 , UpperCamelCase_: List[Any]=30 , UpperCamelCase_: Tuple=4_00 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=True , ): __lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = apply_ocr def lowerCAmelCase__ ( self: int ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = LayoutLMvaImageProcessingTester(self ) @property def lowerCAmelCase__ ( self: Any ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase_ , """apply_ocr""" ) ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase__ ( self: Union[str, Any] ): pass def lowerCAmelCase__ ( self: Optional[int] ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase_ ) self.assertIsInstance(encoding.boxes , UpperCamelCase_ ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: Tuple ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: List[str] ): # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase__ ( self: Union[str, Any] ): # with apply_OCR = True __lowerCamelCase = LayoutLMvaImageProcessor() from datasets import load_dataset __lowerCamelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) __lowerCamelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __lowerCamelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 __lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase_ ) self.assertListEqual(encoding.boxes , UpperCamelCase_ ) # with apply_OCR = False __lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) __lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
12
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Dict = DistilBertTokenizer UpperCAmelCase__ : Dict = DistilBertTokenizerFast UpperCAmelCase__ : Tuple = True @slow def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) __lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
12
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): def __init__( self: Tuple , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Optional[int] ): warnings.warn( """The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use SegformerImageProcessor instead.""" , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
12
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ = 16 UpperCAmelCase_ = 32 def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ): '''simple docstring''' __lowerCamelCase = AutoTokenizer.from_pretrained(A__ ) __lowerCamelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(A__ : int ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowerCamelCase = datasets.map( A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(A__ : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) __lowerCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ): '''simple docstring''' model.eval() __lowerCamelCase = 0 for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**A__ ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __lowerCamelCase, __lowerCamelCase = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A__ ) - 1: __lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A__ , references=A__ , ) __lowerCamelCase = metric.compute() return eval_metric["accuracy"] def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config["""lr"""] __lowerCamelCase = int(config["""num_epochs"""] ) __lowerCamelCase = int(config["""seed"""] ) __lowerCamelCase = int(config["""batch_size"""] ) __lowerCamelCase = args.model_name_or_path set_seed(A__ ) __lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ ) # Instantiate optimizer __lowerCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ ) if accelerator.state.deepspeed_plugin is not None: __lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: __lowerCamelCase = 1 __lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , ) else: __lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # We need to keep track of how many total steps we have iterated over __lowerCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __lowerCamelCase = 0 __lowerCamelCase = evaluate.load("""glue""" , """mrpc""" ) __lowerCamelCase = num_epochs if args.partial_train_epoch is not None: __lowerCamelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1] __lowerCamelCase = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __lowerCamelCase = int(A__ ) + 1 __lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ ) accelerator.print("""resumed checkpoint performance:""" , A__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f: __lowerCamelCase = json.load(A__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __lowerCamelCase = {} for epoch in range(A__ , A__ ): model.train() for step, batch in enumerate(A__ ): __lowerCamelCase = model(**A__ ) __lowerCamelCase = outputs.loss __lowerCamelCase = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __lowerCamelCase = f'epoch_{epoch}' __lowerCamelCase = os.path.join(args.output_dir , A__ ) accelerator.save_state(A__ ) __lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ ) __lowerCamelCase = accuracy __lowerCamelCase = lr_scheduler.get_lr()[0] __lowerCamelCase = optimizer.param_groups[0]["""lr"""] __lowerCamelCase = epoch __lowerCamelCase = overall_step accelerator.print(f'epoch {epoch}:' , A__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f: json.dump(A__ , A__ ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , ) parser.add_argument( """--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
12
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ): '''simple docstring''' __lowerCamelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __lowerCamelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCamelCase__( __lowerCamelCase): def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ): super().__init__() self.register_modules( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , ) __lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ): if latents is None: __lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) __lowerCamelCase = latents.to(UpperCamelCase_ ) __lowerCamelCase = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __lowerCamelCase = torch.device(F'cuda:{gpu_id}' ) __lowerCamelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ): if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __lowerCamelCase = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCamelCase = None for cpu_offloaded_model in [self.unet, self.movq]: __lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ ) # We'll offload the last model manually. __lowerCamelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCAmelCase__ ( self: int ): if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ): __lowerCamelCase = self._execution_device __lowerCamelCase = guidance_scale > 1.0 if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 ) __lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: __lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ ) __lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) __lowerCamelCase = self.scheduler.timesteps __lowerCamelCase = self.movq.config.latent_channels __lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor ) # create initial latent __lowerCamelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance __lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint} __lowerCamelCase = self.unet( sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0] if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 ) __lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 ) __lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0] # post-processing __lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: __lowerCamelCase = image * 0.5 + 0.5 __lowerCamelCase = image.clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
12
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCAmelCase_ = get_tests_dir('fixtures') UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json') class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = 0 def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with tempfile.TemporaryDirectory() as tmpdirname: __lowerCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) __lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ ) # save in new folder model_config.save_pretrained(UpperCamelCase_ ) config.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) # make sure private variable is not incorrectly saved __lowerCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): with self.assertRaisesRegex( UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCAmelCase__ ( self: Tuple ): with self.assertRaisesRegex( UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" ) def lowerCAmelCase__ ( self: Optional[Any] ): with self.assertRaisesRegex( UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCAmelCase__ ( self: Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCAmelCase__ ( self: Any ): try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self: Dict ): class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : str = True try: AutoConfig.register("""custom""" , UpperCamelCase_ ) AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ ) # If remote code is not set, the default is to use local __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __lowerCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
12
1
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ): '''simple docstring''' if len(A__ ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(A__ ) or left < -len(A__ ) or right >= len(A__ ) or right < -len(A__ ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] __lowerCamelCase = (left + right) >> 1 # the middle __lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid] __lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
12
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version UpperCAmelCase_ = get_logger(__name__) class lowerCamelCase__: UpperCAmelCase__ : List[Any] = 'dummy_data' UpperCAmelCase__ : str = 'datasets' UpperCAmelCase__ : Tuple = False def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ): __lowerCamelCase = 0 __lowerCamelCase = dataset_name __lowerCamelCase = cache_dir __lowerCamelCase = use_local_dummy_data __lowerCamelCase = config # download_callbacks take a single url as input __lowerCamelCase = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __lowerCamelCase = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __lowerCamelCase = str(UpperCamelCase_ ) # to be downloaded __lowerCamelCase = None __lowerCamelCase = None @property def lowerCAmelCase__ ( self: List[Any] ): if self._dummy_file is None: __lowerCamelCase = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase__ ( self: str ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("""dummy""" , self.version_name ) @property def lowerCAmelCase__ ( self: Optional[Any] ): return os.path.join(self.dummy_data_folder , """dummy_data.zip""" ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __lowerCamelCase = cached_path( UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ ) return os.path.join(UpperCamelCase_ , self.dummy_file_name ) @property def lowerCAmelCase__ ( self: Optional[Any] ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase__ ( self: Tuple ): if self._bucket_url is None: __lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) ) return self._bucket_url @property def lowerCAmelCase__ ( self: str ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ): if self.load_existing_dummy_data: # dummy data is downloaded and tested __lowerCamelCase = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __lowerCamelCase = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ ) else: return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ): return self.download_and_extract(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ): return self.download_and_extract(UpperCamelCase_ ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ): return path def lowerCAmelCase__ ( self: Dict ): return {} def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): for single_url in single_urls: download_callback(UpperCamelCase_ ) else: __lowerCamelCase = single_urls download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls] else: __lowerCamelCase = single_urls __lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) __lowerCamelCase = value # make sure that values are unique if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ): __lowerCamelCase = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url ) __lowerCamelCase = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) ) dummy_data_list.append(UpperCamelCase_ ) return dummy_data_list def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ): for download_callback in self.download_callbacks: download_callback(UpperCamelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) ) if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase__ ( self: Optional[Any] ): pass def lowerCAmelCase__ ( self: List[Any] ): pass def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ): def _iter_archive_members(UpperCamelCase_: Any ): # this preserves the order of the members inside the ZIP archive __lowerCamelCase = Path(self.dummy_file ).parent __lowerCamelCase = path.relative_to(UpperCamelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __lowerCamelCase = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase_ ) __lowerCamelCase = Path(UpperCamelCase_ ) __lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ): yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ): if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = [paths] for path in paths: if os.path.isfile(UpperCamelCase_ ): if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ): if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ): continue dirnames.sort() for filename in sorted(UpperCamelCase_ ): if filename.startswith((""".""", """__""") ): continue yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
12
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] UpperCAmelCase_ = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] UpperCAmelCase_ = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): UpperCAmelCase_ = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ): '''simple docstring''' __lowerCamelCase = len(A__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(A__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , ) def lowerCamelCase__ ( A__ : int ): '''simple docstring''' __lowerCamelCase = [] depth_first_search([] , [] , [] , A__ , A__ ) # Print all the boards for board in boards: for column in board: print(A__ ) print("""""" ) print(len(A__ ) , """solutions were found.""" ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
12
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Any = 'philschmid/bart-large-cnn-samsum' UpperCAmelCase__ : str = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) UpperCAmelCase__ : List[str] = 'summarizer' UpperCAmelCase__ : Tuple = AutoTokenizer UpperCAmelCase__ : Tuple = AutoModelForSeqaSeqLM UpperCAmelCase__ : Any = ['text'] UpperCAmelCase__ : List[str] = ['text'] def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] ): return self.pre_processor(UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
12
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class lowerCamelCase__: UpperCAmelCase__ : int UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess') def lowerCamelCase__ ( A__ : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(A__ : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(A__ ) != count_coins(A__ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.left ) __lowerCamelCase, __lowerCamelCase = get_distrib(node.right ) __lowerCamelCase = 1 - left_distrib_excess __lowerCamelCase = 1 - right_distrib_excess __lowerCamelCase = ( left_distrib_moves + right_distrib_moves + abs(A__ ) + abs(A__ ) ) __lowerCamelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(A__ , A__ ) return get_distrib(A__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
12
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = BlipImageProcessor() __lowerCamelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) __lowerCamelCase = BlipaProcessor(UpperCamelCase_ , UpperCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self: Dict , **UpperCamelCase_: Tuple ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: Optional[int] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor def lowerCAmelCase__ ( self: Any ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) __lowerCamelCase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" ) __lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = """lower newer""" __lowerCamelCase = processor(text=UpperCamelCase_ ) __lowerCamelCase = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = """lower newer""" __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowerCamelCase = """lower newer""" __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
12
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Tuple = ['pixel_values'] def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ): super().__init__(**UpperCamelCase_ ) __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad __lowerCamelCase = pad_size def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ): __lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ ) __lowerCamelCase = (old_height // size + 1) * size - old_height __lowerCamelCase = (old_width // size + 1) * size - old_width return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ): __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_pad if do_pad is not None else self.do_pad __lowerCamelCase = pad_size if pad_size is not None else self.pad_size __lowerCamelCase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_pad: __lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] __lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowerCamelCase = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
12
1
from __future__ import annotations from PIL import Image # Define glider example UpperCAmelCase_ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowerCamelCase__ ( A__ : list[list[int]] ): '''simple docstring''' __lowerCamelCase = [] for i in range(len(A__ ) ): __lowerCamelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __lowerCamelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __lowerCamelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A__ ) return next_generation def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ): '''simple docstring''' __lowerCamelCase = [] for _ in range(A__ ): # Create output image __lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) ) __lowerCamelCase = img.load() # Save cells to image for x in range(len(A__ ) ): for y in range(len(cells[0] ) ): __lowerCamelCase = 255 - cells[y][x] * 255 __lowerCamelCase = (colour, colour, colour) # Save image images.append(A__ ) __lowerCamelCase = new_generation(A__ ) return images if __name__ == "__main__": UpperCAmelCase_ = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
12
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ): '''simple docstring''' if len(A__ ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(A__ ) or left < -len(A__ ) or right >= len(A__ ) or right < -len(A__ ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] __lowerCamelCase = (left + right) >> 1 # the middle __lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid] __lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
12
1