code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ): _UpperCAmelCase : Tuple = str(a_ ) dataset_info.write_to_directory(a_ ) _UpperCAmelCase : Any = DatasetInfo.from_directory(a_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a_, "dataset_info.json" ) ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, ) _UpperCAmelCase : Tuple = dataset_info._to_yaml_dict() assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(a_ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(a_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ): _UpperCAmelCase : str = DatasetInfo() _UpperCAmelCase : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1_337 ), } ), ], ) def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(a_ ) dataset_infos_dict.write_to_directory(a_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a_, "README.md" ) )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: list[list[int]], a_: int, a_: int, a_: list[int] ): # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def __UpperCAmelCase ( a_: list[list[int]], a_: list[int], a_: int ): # Base Case if curr_ind == len(a_ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0, len(a_ ) ): if valid_connection(a_, a_, a_, a_ ): # Insert current vertex into path as next transition _UpperCAmelCase : Optional[Any] = next_ver # Validate created path if util_hamilton_cycle(a_, a_, curr_ind + 1 ): return True # Backtrack _UpperCAmelCase : Optional[int] = -1 return False def __UpperCAmelCase ( a_: list[list[int]], a_: int = 0 ): _UpperCAmelCase : int = [-1] * (len(a_ ) + 1) # initialize start and end of path with starting index _UpperCAmelCase : Union[str, Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(a_, a_, 1 ) else []
17
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(map(a_, str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list ): if not nums: raise ValueError("List is empty" ) return sum(a_ ) / len(a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: Union[str, Any] ): if isinstance(a_, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(a_, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(a_ ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : List[Any] = ['''pixel_values'''] def __init__( self : List[str] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : str , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase__ ) _UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 2_2_4} _UpperCAmelCase : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} _UpperCAmelCase : List[str] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" ) _UpperCAmelCase : List[str] = do_resize _UpperCAmelCase : Optional[int] = size _UpperCAmelCase : List[str] = do_center_crop _UpperCAmelCase : str = crop_size _UpperCAmelCase : Tuple = resample _UpperCAmelCase : List[str] = do_rescale _UpperCAmelCase : Dict = rescale_factor _UpperCAmelCase : Tuple = do_normalize _UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" in size: _UpperCAmelCase : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size["shortest_edge"] , default_to_square=lowerCAmelCase__ ) elif "height" in size and "width" in size: _UpperCAmelCase : Tuple = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : str , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase : Optional[int] = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : str , ) -> str: """simple docstring""" return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _UpperCAmelCase : int = to_numpy_array(lowerCAmelCase__ ) if do_resize: _UpperCAmelCase : int = self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) if do_center_crop: _UpperCAmelCase : Tuple = self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ ) if do_rescale: _UpperCAmelCase : int = self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) if do_normalize: _UpperCAmelCase : str = self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) return image def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" _UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample _UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std _UpperCAmelCase : List[Any] = size if size is not None else self.size _UpperCAmelCase : str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase : Optional[Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" ) if not valid_images(lowerCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) _UpperCAmelCase : Any = make_batched(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = [ [ self._preprocess_image( image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , ) for img in video ] for video in videos ] _UpperCAmelCase : Dict = {"pixel_values": videos} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
17
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int | str] ): create_state_space_tree(a_, [], 0, [0 for i in range(len(a_ ) )] ) def __UpperCAmelCase ( a_: list[int | str], a_: list[int | str], a_: int, a_: list[int], ): if index == len(a_ ): print(a_ ) return for i in range(len(a_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _UpperCAmelCase : str = True create_state_space_tree(a_, a_, index + 1, a_ ) current_sequence.pop() _UpperCAmelCase : int = False __a = [3, 1, 2, 4] generate_all_permutations(sequence) __a = ["A", "B", "C"] generate_all_permutations(sequence_a)
17
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( a_: str ): for param in module.parameters(): _UpperCAmelCase : Any = False def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" ) return timestamp
17
1
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __a = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Tuple = test_results.split(" " ) _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Tuple = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. _UpperCAmelCase : Any = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(a_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Any = {} _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : int = False for line in failures_short_lines.split("\n" ): if re.search(r"_ \[doctest\]", a_ ): _UpperCAmelCase : Dict = True _UpperCAmelCase : Union[str, Any] = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): _UpperCAmelCase : Union[str, Any] = line _UpperCAmelCase : str = False return failures class A__ : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = title _UpperCAmelCase : str = doc_test_results["time_spent"].split("," )[0] _UpperCAmelCase : str = doc_test_results["success"] _UpperCAmelCase : Tuple = doc_test_results["failures"] _UpperCAmelCase : Optional[Any] = self.n_success + self.n_failures # Failures and success of the modeling tests _UpperCAmelCase : Optional[Any] = doc_test_results @property def _lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = [self._time_spent] _UpperCAmelCase : Optional[Any] = 0 for time in time_spent: _UpperCAmelCase : Optional[Any] = time.split(":" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__ ) == 1: _UpperCAmelCase : List[str] = [0, 0, time_parts[0]] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0 return F"""{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s""" @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": ( F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" F""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" _UpperCAmelCase : str = 4_0 _UpperCAmelCase : Union[str, Any] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )} _UpperCAmelCase : List[Any] = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__ ) == 0: continue if report != "": report += "\n\n" report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"""The following examples had failures:\n\n\n{report}\n""", }, } @property def _lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowerCAmelCase__ ) @staticmethod def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : int = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } ] print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(lowerCAmelCase__ )} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(self.payload )} ) ) _UpperCAmelCase : int = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed." _UpperCAmelCase : int = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = "" for key, value in failures.items(): _UpperCAmelCase : Tuple = value[:2_0_0] + " [Truncated]" if len(lowerCAmelCase__ ) > 2_5_0 else value failures_text += F"""*{key}*\n_{value}_\n\n""" _UpperCAmelCase : List[Any] = job_name _UpperCAmelCase : str = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: _UpperCAmelCase : Union[str, Any] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made." ) _UpperCAmelCase : int = self.doc_test_results.pop("job_link" ) self.doc_test_results.pop("failures" ) self.doc_test_results.pop("success" ) self.doc_test_results.pop("time_spent" ) _UpperCAmelCase : List[Any] = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__ : t[0] ) for job, job_result in sorted_dict: if len(job_result["failures"] ): _UpperCAmelCase : Dict = F"""*Num failures* :{len(job_result['failed'] )} \n""" _UpperCAmelCase : Union[str, Any] = job_result["failures"] _UpperCAmelCase : Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ ) print("Sending the following reply" ) print(json.dumps({"blocks": blocks} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"""Results for {job}""" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1 ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = os.environ["GITHUB_RUN_ID"] _UpperCAmelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" _UpperCAmelCase : Dict = requests.get(a_ ).json() _UpperCAmelCase : Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _UpperCAmelCase : Tuple = math.ceil((result["total_count"] - 100) / 100 ) for i in range(a_ ): _UpperCAmelCase : Any = requests.get(url + f"""&page={i + 2}""" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links.", a_ ) return {} def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : List[str] = {} if os.path.exists(a_ ): _UpperCAmelCase : int = os.listdir(a_ ) for file in files: try: with open(os.path.join(a_, a_ ), encoding="utf-8" ) as f: _UpperCAmelCase : Tuple = f.read() except UnicodeDecodeError as e: raise ValueError(f"""Could not open {os.path.join(a_, a_ )}.""" ) from e return _artifact def __UpperCAmelCase ( ): class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : str ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = name _UpperCAmelCase : List[str] = [] def __str__( self : List[Any] ) -> List[Any]: """simple docstring""" return self.name def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" self.paths.append({"name": self.name, "path": path} ) _UpperCAmelCase : Dict[str, Artifact] = {} _UpperCAmelCase : Optional[int] = filter(os.path.isdir, os.listdir() ) for directory in directories: _UpperCAmelCase : Optional[Any] = directory if artifact_name not in _available_artifacts: _UpperCAmelCase : Tuple = Artifact(a_ ) _available_artifacts[artifact_name].add_path(a_ ) return _available_artifacts if __name__ == "__main__": __a = get_job_links() __a = retrieve_available_artifacts() __a = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __a = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job __a = github_actions_job_links.get('run_doctests') __a = available_artifacts['doc_tests_gpu_test_reports'].paths[0] __a = retrieve_artifact(artifact_path['name']) if "stats" in artifact: __a , __a , __a = handle_test_results(artifact['stats']) __a = failed __a = success __a = time_spent[1:-1] + ', ' __a = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): __a = line.replace('FAILED ', '') __a = line.split()[0].replace('\n', '') if "::" in line: __a , __a = line.split('::') else: __a , __a = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __a = docs[file_regex] doc_test_results[category]["failed"].append(test) __a = all_failures[test] if test in all_failures else 'N/A' __a = failure break __a = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
17
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
1
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : int , ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = parent _UpperCAmelCase : str = 1_3 _UpperCAmelCase : Optional[Any] = 7 _UpperCAmelCase : Optional[int] = 3_0 _UpperCAmelCase : List[str] = self.seq_length + self.mem_len _UpperCAmelCase : Tuple = 1_5 _UpperCAmelCase : int = True _UpperCAmelCase : Tuple = True _UpperCAmelCase : Optional[Any] = 9_9 _UpperCAmelCase : Union[str, Any] = [1_0, 5_0, 8_0] _UpperCAmelCase : List[str] = 3_2 _UpperCAmelCase : int = 3_2 _UpperCAmelCase : Dict = 4 _UpperCAmelCase : Dict = 8 _UpperCAmelCase : int = 1_2_8 _UpperCAmelCase : int = 2 _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : int = None _UpperCAmelCase : Optional[Any] = 1 _UpperCAmelCase : Optional[int] = 0 _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : Tuple = self.vocab_size - 1 _UpperCAmelCase : Tuple = 0.01 def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Any = None if self.use_labels: _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Union[str, Any] = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[Any] = TFTransfoXLModel(lowerCAmelCase__ ) _UpperCAmelCase , _UpperCAmelCase : Any = model(lowerCAmelCase__ ).to_tuple() _UpperCAmelCase : List[str] = {"input_ids": input_ids_a, "mems": mems_a} _UpperCAmelCase , _UpperCAmelCase : Tuple = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = TFTransfoXLLMHeadModel(lowerCAmelCase__ ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ ).to_tuple() _UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels} _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ ).to_tuple() _UpperCAmelCase , _UpperCAmelCase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple() _UpperCAmelCase : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} _UpperCAmelCase , _UpperCAmelCase : Any = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = TFTransfoXLForSequenceClassification(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : List[Any] = config_and_inputs _UpperCAmelCase : Any = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : int = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCamelCase_ : Union[str, Any] = () if is_tf_available() else () UpperCamelCase_ : Tuple = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Optional[Any] = False def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" _UpperCAmelCase : int = TFTransfoXLModelTester(self ) _UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , d_embed=3_7 ) def _lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" self.model_tester.set_seed() _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" self.model_tester.set_seed() _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _UpperCAmelCase : List[Any] = model.get_output_embeddings() assert isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) _UpperCAmelCase : str = model.get_bias() assert name is None else: _UpperCAmelCase : Optional[Any] = model.get_output_embeddings() assert x is None _UpperCAmelCase : int = model.get_bias() assert name is None def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" pass @slow def _lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Tuple = TFTransfoXLModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." ) def _lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" pass @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @unittest.skip("Skip test until #12651 is resolved." ) @slow def _lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _UpperCAmelCase : str = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" ) # fmt: off _UpperCAmelCase : List[str] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _UpperCAmelCase : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _UpperCAmelCase : Optional[Any] = model.generate(lowerCAmelCase__ , max_length=2_0_0 , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: float, a_: float ): if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(a_ ) * abs(a_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
17
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __UpperCAmelCase ( a_: int ): # A local function to see if a dot lands in the circle. def is_in_circle(a_: float, a_: float ) -> bool: _UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(a_ ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : Optional[int] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ): return mean( function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value) def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ): def identity_function(a_: float ) -> float: return x _UpperCAmelCase : Union[str, Any] = area_under_curve_estimator( a_, a_, a_, a_ ) _UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __UpperCAmelCase ( a_: int ): def function_to_integrate(a_: float ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( a_, a_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : int = ['''image_processor''', '''tokenizer'''] UpperCamelCase_ : int = '''AutoImageProcessor''' UpperCamelCase_ : str = '''AutoTokenizer''' def __init__( self : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Tuple ) -> Tuple: """simple docstring""" _UpperCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCAmelCase__ , ) _UpperCAmelCase : Tuple = kwargs.pop("feature_extractor" ) _UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = self.image_processor _UpperCAmelCase : Any = False def __call__( self : Union[str, Any] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> int: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = kwargs.pop("images" , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = kwargs.pop("text" , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: _UpperCAmelCase : Dict = args[0] _UpperCAmelCase : str = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _UpperCAmelCase : Optional[Any] = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None: _UpperCAmelCase : List[str] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ ) if text is None: return inputs elif images is None: return encodings else: _UpperCAmelCase : Optional[Any] = encodings["input_ids"] return inputs def _lowerCAmelCase ( self : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Any: """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @contextmanager def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) _UpperCAmelCase : Optional[Any] = True _UpperCAmelCase : List[Any] = self.tokenizer yield _UpperCAmelCase : Union[str, Any] = self.image_processor _UpperCAmelCase : int = False def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : List[Any]=None ) -> str: """simple docstring""" if added_vocab is None: _UpperCAmelCase : str = self.tokenizer.get_added_vocab() _UpperCAmelCase : int = {} while tokens: _UpperCAmelCase : Union[str, Any] = re.search(R"<s_(.*?)>" , lowerCAmelCase__ , re.IGNORECASE ) if start_token is None: break _UpperCAmelCase : Union[str, Any] = start_token.group(1 ) _UpperCAmelCase : Union[str, Any] = re.search(RF"""</s_{key}>""" , lowerCAmelCase__ , re.IGNORECASE ) _UpperCAmelCase : Tuple = start_token.group() if end_token is None: _UpperCAmelCase : Any = tokens.replace(lowerCAmelCase__ , "" ) else: _UpperCAmelCase : Any = end_token.group() _UpperCAmelCase : Tuple = re.escape(lowerCAmelCase__ ) _UpperCAmelCase : Any = re.escape(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , lowerCAmelCase__ , re.IGNORECASE ) if content is not None: _UpperCAmelCase : Union[str, Any] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _UpperCAmelCase : Optional[Any] = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ ) if value: if len(lowerCAmelCase__ ) == 1: _UpperCAmelCase : str = value[0] _UpperCAmelCase : Tuple = value else: # leaf nodes _UpperCAmelCase : List[Any] = [] for leaf in content.split(R"<sep/>" ): _UpperCAmelCase : Union[str, Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _UpperCAmelCase : Tuple = leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase__ ) if len(output[key] ) == 1: _UpperCAmelCase : Tuple = output[key][0] _UpperCAmelCase : Tuple = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ ) if len(lowerCAmelCase__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , ) return self.image_processor_class @property def _lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , ) return self.image_processor
17
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
1
'''simple docstring''' __a = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def __UpperCAmelCase ( a_: float ): assert type(a_ ) in (int, float) and decimal == int(a_ ) _UpperCAmelCase : Any = int(a_ ) _UpperCAmelCase : Union[str, Any] = "" _UpperCAmelCase : Dict = False if decimal < 0: _UpperCAmelCase : str = True decimal *= -1 while decimal > 0: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(a_, 16 ) _UpperCAmelCase : Tuple = values[remainder] + hexadecimal _UpperCAmelCase : Tuple = "0x" + hexadecimal if negative: _UpperCAmelCase : Any = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import os def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : int = len(grid[0] ) _UpperCAmelCase : Tuple = len(a_ ) _UpperCAmelCase : str = 0 _UpperCAmelCase : str = 0 _UpperCAmelCase : Dict = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(a_ ): for j in range(n_rows - 3 ): _UpperCAmelCase : Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] _UpperCAmelCase : List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: _UpperCAmelCase : Dict = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: _UpperCAmelCase : int = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) _UpperCAmelCase : Tuple = max( a_, a_, a_, a_ ) if max_product > largest: _UpperCAmelCase : Any = max_product return largest def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = [] with open(os.path.dirname(a_ ) + "/grid.txt" ) as file: for line in file: grid.append(line.strip("\n" ).split(" " ) ) _UpperCAmelCase : Tuple = [[int(a_ ) for i in grid[j]] for j in range(len(a_ ) )] return largest_product(a_ ) if __name__ == "__main__": print(solution())
17
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
1
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
1
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __a = logging.getLogger(__name__) __a = 'Hello world! cécé herlolip' __a = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def __UpperCAmelCase ( a_: Optional[int], a_: str ): _UpperCAmelCase : Optional[Any] = BertAbsConfig( temp_dir=".", finetune_bert=a_, large=a_, share_emb=a_, use_bert_emb=a_, encoder="bert", max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2_048, dec_dropout=0.2, ) _UpperCAmelCase : Dict = torch.load(a_, lambda a_, a_ : storage ) _UpperCAmelCase : Tuple = AbsSummarizer(a_, torch.device("cpu" ), a_ ) original.eval() _UpperCAmelCase : Dict = BertAbsSummarizer(a_, torch.device("cpu" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical" ) _UpperCAmelCase : Dict = BertTokenizer.from_pretrained("bert-base-uncased" ) # prepare the model inputs _UpperCAmelCase : List[Any] = tokenizer.encode("This is sample éàalj'-." ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(a_ )) ) _UpperCAmelCase : Union[str, Any] = torch.tensor(a_ ).unsqueeze(0 ) _UpperCAmelCase : List[str] = tokenizer.encode("This is sample 3 éàalj'-." ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(a_ )) ) _UpperCAmelCase : Any = torch.tensor(a_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass _UpperCAmelCase : Union[str, Any] = encoder_input_ids _UpperCAmelCase : Optional[int] = decoder_input_ids _UpperCAmelCase : List[str] = None _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Dict = None _UpperCAmelCase : List[str] = None _UpperCAmelCase : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical _UpperCAmelCase : Tuple = original(a_, a_, a_, a_, a_, a_, a_ )[0] _UpperCAmelCase : str = original.generator(a_ ) _UpperCAmelCase : int = new_model( a_, a_, a_, a_, a_ )[0] _UpperCAmelCase : Union[str, Any] = new_model.generator(a_ ) _UpperCAmelCase : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(a_ ) ) _UpperCAmelCase : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(a_ ) ) _UpperCAmelCase : Dict = torch.allclose(a_, a_, atol=1e-3 ) if are_identical: logging.info("all weights are equal up to 1e-3" ) else: raise ValueError("the weights are different. The new model is likely different from the original one." ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary" ) torch.save( new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) __a = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
17
'''simple docstring''' from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class A__ : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class A__ : """simple docstring""" UpperCamelCase_ : Union[str, Any] = [] def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = obj _UpperCAmelCase : int = target _UpperCAmelCase : Optional[int] = new _UpperCAmelCase : Any = target.split("." )[0] _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Dict = attrs or [] def __enter__( self : List[str] ) -> int: """simple docstring""" *_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: _UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) _UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: _UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCAmelCase : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
17
1
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def __UpperCAmelCase ( a_: Tuple, a_: Union[str, Any], a_: List[Any] ): if isinstance(a_, torch.Tensor ): return image elif isinstance(a_, PIL.Image.Image ): _UpperCAmelCase : Any = [image] if isinstance(image[0], PIL.Image.Image ): _UpperCAmelCase : Union[str, Any] = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] _UpperCAmelCase : List[Any] = np.concatenate(a_, axis=0 ) _UpperCAmelCase : Tuple = np.array(a_ ).astype(np.floataa ) / 2_55.0 _UpperCAmelCase : List[str] = image.transpose(0, 3, 1, 2 ) _UpperCAmelCase : List[str] = 2.0 * image - 1.0 _UpperCAmelCase : Optional[int] = torch.from_numpy(a_ ) elif isinstance(image[0], torch.Tensor ): _UpperCAmelCase : int = torch.cat(a_, dim=0 ) return image def __UpperCAmelCase ( a_: List[str], a_: Any, a_: Union[str, Any], a_: List[Any]=0.99_95 ): if not isinstance(a_, np.ndarray ): _UpperCAmelCase : List[Any] = True _UpperCAmelCase : List[str] = va.device _UpperCAmelCase : Dict = va.cpu().numpy() _UpperCAmelCase : Optional[Any] = va.cpu().numpy() _UpperCAmelCase : Any = np.sum(va * va / (np.linalg.norm(a_ ) * np.linalg.norm(a_ )) ) if np.abs(a_ ) > DOT_THRESHOLD: _UpperCAmelCase : Dict = (1 - t) * va + t * va else: _UpperCAmelCase : Any = np.arccos(a_ ) _UpperCAmelCase : Optional[Any] = np.sin(a_ ) _UpperCAmelCase : List[Any] = theta_a * t _UpperCAmelCase : int = np.sin(a_ ) _UpperCAmelCase : Tuple = np.sin(theta_a - theta_t ) / sin_theta_a _UpperCAmelCase : str = sin_theta_t / sin_theta_a _UpperCAmelCase : int = sa * va + sa * va if inputs_are_torch: _UpperCAmelCase : Optional[Any] = torch.from_numpy(a_ ).to(a_ ) return va def __UpperCAmelCase ( a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[str] = F.normalize(a_, dim=-1 ) _UpperCAmelCase : Dict = F.normalize(a_, dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def __UpperCAmelCase ( a_: str, a_: str ): for param in model.parameters(): _UpperCAmelCase : Any = value class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase__ : CLIPFeatureExtractor , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , ) -> int: """simple docstring""" super().__init__() self.register_modules( vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , clip_model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , coca_model=lowerCAmelCase__ , coca_tokenizer=lowerCAmelCase__ , coca_transform=lowerCAmelCase__ , ) _UpperCAmelCase : List[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , lowerCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) _UpperCAmelCase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowerCAmelCase__ ) set_requires_grad(self.clip_model , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" self.enable_attention_slicing(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" set_requires_grad(self.vae , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" set_requires_grad(self.vae , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" set_requires_grad(self.unet , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" set_requires_grad(self.unet , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : int = min(int(num_inference_steps * strength ) , lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = max(num_inference_steps - init_timestep , 0 ) _UpperCAmelCase : str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ) -> str: """simple docstring""" if not isinstance(lowerCAmelCase__ , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__ )}""" ) _UpperCAmelCase : Tuple = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : List[Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase__ ) ] _UpperCAmelCase : Optional[Any] = torch.cat(lowerCAmelCase__ , dim=0 ) else: _UpperCAmelCase : int = self.vae.encode(lowerCAmelCase__ ).latent_dist.sample(lowerCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase : Union[str, Any] = 0.1_8215 * init_latents _UpperCAmelCase : Any = init_latents.repeat_interleave(lowerCAmelCase__ , dim=0 ) _UpperCAmelCase : str = randn_tensor(init_latents.shape , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) # get latents _UpperCAmelCase : Dict = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = init_latents return latents def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Tuple = self.coca_transform(lowerCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _UpperCAmelCase : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _UpperCAmelCase : List[str] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Dict = self.feature_extractor.preprocess(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() _UpperCAmelCase : List[str] = self.clip_model.get_image_features(lowerCAmelCase__ ) _UpperCAmelCase : int = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = image_embeddings_clip.repeat_interleave(lowerCAmelCase__ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , ) -> str: """simple docstring""" _UpperCAmelCase : Union[str, Any] = latents.detach().requires_grad_() _UpperCAmelCase : Tuple = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) # predict the noise residual _UpperCAmelCase : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _UpperCAmelCase : Union[str, Any] = self.scheduler.alphas_cumprod[timestep] _UpperCAmelCase : Tuple = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase : int = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _UpperCAmelCase : Any = torch.sqrt(lowerCAmelCase__ ) _UpperCAmelCase : int = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowerCAmelCase__ ): _UpperCAmelCase : Dict = self.scheduler.sigmas[index] _UpperCAmelCase : Optional[Any] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase : Any = 1 / 0.1_8215 * sample _UpperCAmelCase : Any = self.vae.decode(lowerCAmelCase__ ).sample _UpperCAmelCase : str = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase : List[str] = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = self.normalize(lowerCAmelCase__ ).to(latents.dtype ) _UpperCAmelCase : Union[str, Any] = self.clip_model.get_image_features(lowerCAmelCase__ ) _UpperCAmelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase__ ) _UpperCAmelCase : str = spherical_dist_loss(lowerCAmelCase__ , lowerCAmelCase__ ).mean() * clip_guidance_scale _UpperCAmelCase : Any = -torch.autograd.grad(lowerCAmelCase__ , lowerCAmelCase__ )[0] if isinstance(self.scheduler , lowerCAmelCase__ ): _UpperCAmelCase : Tuple = latents.detach() + grads * (sigma**2) _UpperCAmelCase : Any = noise_pred_original else: _UpperCAmelCase : Optional[Any] = noise_pred_original - torch.sqrt(lowerCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : List[str] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = 5_1_2 , lowerCAmelCase__ : Optional[int] = 5_1_2 , lowerCAmelCase__ : float = 0.6 , lowerCAmelCase__ : Optional[int] = 5_0 , lowerCAmelCase__ : Optional[float] = 7.5 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[float] = 1_0_0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : float = 0.8 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , ) -> List[str]: """simple docstring""" if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(lowerCAmelCase__ , torch.Generator ) and batch_size > 1: _UpperCAmelCase : Optional[Any] = [generator] + [None] * (batch_size - 1) _UpperCAmelCase : Optional[Any] = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] _UpperCAmelCase : Tuple = [x[0] for x in coca_is_none if x[1]] _UpperCAmelCase : int = ", ".join(lowerCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowerCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _UpperCAmelCase : str = self.get_image_description(lowerCAmelCase__ ) if style_prompt is None: if len(lowerCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _UpperCAmelCase : str = self.get_image_description(lowerCAmelCase__ ) # get prompt text embeddings for content and style _UpperCAmelCase : Tuple = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , ) _UpperCAmelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _UpperCAmelCase : Dict = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , ) _UpperCAmelCase : Optional[int] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _UpperCAmelCase : Any = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # duplicate text embeddings for each generation per prompt _UpperCAmelCase : str = text_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 ) # set timesteps _UpperCAmelCase : List[Any] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _UpperCAmelCase : Dict = {} if accepts_offset: _UpperCAmelCase : List[str] = 1 self.scheduler.set_timesteps(lowerCAmelCase__ , **lowerCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , self.device ) _UpperCAmelCase : Optional[Any] = timesteps[:1].repeat(lowerCAmelCase__ ) # Preprocess image _UpperCAmelCase : Dict = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = self.prepare_latents( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = preprocess(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[str] = self.prepare_latents( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text_embeddings.dtype , self.device , lowerCAmelCase__ ) _UpperCAmelCase : int = slerp(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if clip_guidance_scale > 0: _UpperCAmelCase : int = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_clip_image_embeddings(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = slerp( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _UpperCAmelCase : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _UpperCAmelCase : Optional[int] = content_text_input.input_ids.shape[-1] _UpperCAmelCase : Any = self.tokenizer([""] , padding="max_length" , max_length=lowerCAmelCase__ , return_tensors="pt" ) _UpperCAmelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _UpperCAmelCase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCAmelCase__ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _UpperCAmelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _UpperCAmelCase : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _UpperCAmelCase : Optional[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device="cpu" , dtype=lowerCAmelCase__ ).to( self.device ) else: _UpperCAmelCase : List[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _UpperCAmelCase : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _UpperCAmelCase : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _UpperCAmelCase : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _UpperCAmelCase : List[Any] = {} if accepts_eta: _UpperCAmelCase : List[Any] = eta # check if the scheduler accepts generator _UpperCAmelCase : List[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _UpperCAmelCase : Any = generator with self.progress_bar(total=lowerCAmelCase__ ): for i, t in enumerate(lowerCAmelCase__ ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) # predict the noise residual _UpperCAmelCase : Tuple = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: _UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.chunk(2 ) _UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _UpperCAmelCase : Union[str, Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.cond_fn( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase : List[Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _UpperCAmelCase : Optional[Any] = 1 / 0.1_8215 * latents _UpperCAmelCase : Tuple = self.vae.decode(lowerCAmelCase__ ).sample _UpperCAmelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
17
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __a = datasets.utils.logging.get_logger(__name__) __a = ['names', 'prefix'] __a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] __a = ['encoding_errors', 'on_bad_lines'] __a = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : str = "," UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer" UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Union[int, List[int]]] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[Union[str, List[str]]] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = "." UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = '"' UpperCamelCase_ : int = 0 UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : int = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : int = 1_00_00 UpperCamelCase_ : Optional[datasets.Features] = None UpperCamelCase_ : Optional[str] = "strict" UpperCamelCase_ : Literal["error", "warn", "skip"] = "error" UpperCamelCase_ : Optional[str] = None def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" if self.delimiter is not None: _UpperCAmelCase : Any = self.delimiter if self.column_names is not None: _UpperCAmelCase : List[Any] = self.column_names @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : int = CsvConfig def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = [files] _UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = [files] _UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast _UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCAmelCase : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): _UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
17
1
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = parent def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" return {} def __UpperCAmelCase ( ): _UpperCAmelCase : List[str] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" _UpperCAmelCase : Optional[Any] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any = MarkupLMFeatureExtractor if is_bsa_available() else None def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = MarkupLMFeatureExtractionTester(self ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.feature_extract_tester.prepare_feat_extract_dict() def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.feature_extraction_class() # Test not batched input _UpperCAmelCase : str = get_html_strings()[0] _UpperCAmelCase : Tuple = feature_extractor(lowerCAmelCase__ ) # fmt: off _UpperCAmelCase : Any = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] _UpperCAmelCase : List[Any] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , lowerCAmelCase__ ) self.assertEqual(encoding.xpaths , lowerCAmelCase__ ) # Test batched _UpperCAmelCase : Tuple = get_html_strings() _UpperCAmelCase : Any = feature_extractor(lowerCAmelCase__ ) # fmt: off _UpperCAmelCase : int = expected_nodes + [["My First Heading", "My first paragraph."]] _UpperCAmelCase : Any = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowerCAmelCase__ ) self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
17
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if not nums: return 0 _UpperCAmelCase : int = nums[0] _UpperCAmelCase : Dict = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase : Any = ( max_excluding + num, max(a_, a_ ), ) return max(a_, a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['MobileViTFeatureExtractor'] __a = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): _UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" ) if key.startswith("module.decoder" ): _UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" ) if "norm" in key: _UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] _UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" ) if "layer_norm1" in key: _UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" ) if "layer_norm2" in key: _UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )] _UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" ) if "attn.q" in key: _UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" ) if "attn.proj" in key: _UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" ) if "attn" in key: _UpperCAmelCase : Dict = key.replace("attn", "attention.self" ) if "fc1" in key: _UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" ) if "fc2" in key: _UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" ) if "linear_pred" in key: _UpperCAmelCase : Any = key.replace("linear_pred", "classifier" ) if "linear_fuse" in key: _UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" ) _UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )] _UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" ) if "bot_conv" in key: _UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" ) if "skip_conv1" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" ) if "skip_conv2" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" ) if "fusion1" in key: _UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" ) if "fusion2" in key: _UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" ) if "fusion3" in key: _UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" ) if "fusion" in key and "conv" in key: _UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): _UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" ) _UpperCAmelCase : int = value return new_state_dict def __UpperCAmelCase ( a_: str, a_: List[Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _UpperCAmelCase : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] _UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]] _UpperCAmelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :] def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ): _UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _UpperCAmelCase : Dict = GLPNImageProcessor() # prepare image _UpperCAmelCase : List[Any] = prepare_img() _UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict _UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) ) # rename keys _UpperCAmelCase : List[str] = rename_keys(a_ ) # key and value matrices need special treatment read_in_k_v(a_, a_ ) # create HuggingFace model and load state dict _UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass _UpperCAmelCase : Dict = model(a_ ) _UpperCAmelCase : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _UpperCAmelCase : Optional[Any] = torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: _UpperCAmelCase : Tuple = torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) _UpperCAmelCase : Dict = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, ) image_processor.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
17
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets __a = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' __a = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' __a = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]="uniform_average" , lowerCAmelCase__ : Optional[Any]=True ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = mean_squared_error( lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ ) return {"mse": mse}
17
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = 10 _UpperCAmelCase : int = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) _UpperCAmelCase : List[str] = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(a_ ) ), }, features=a_, ) return dataset @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=a_ ) return filename # FILE_CONTENT + files __a = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt" _UpperCAmelCase : Tuple = FILE_CONTENT with open(a_, "w" ) as f: f.write(a_ ) return filename @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" _UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" ) with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import gzip _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) _UpperCAmelCase : Any = bytes(a_, "utf-8" ) with gzip.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): if datasets.config.LZ4_AVAILABLE: import lza.frame _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" _UpperCAmelCase : str = bytes(a_, "utf-8" ) with lza.frame.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Any ): if datasets.config.PY7ZR_AVAILABLE: import pyazr _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(a_, "w" ) as archive: archive.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: List[str] ): import tarfile _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): import lzma _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz" _UpperCAmelCase : List[str] = bytes(a_, "utf-8" ) with lzma.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: Tuple ): import zipfile _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst" _UpperCAmelCase : int = bytes(a_, "utf-8" ) with zstd.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): _UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml" _UpperCAmelCase : Tuple = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(a_, "w" ) as f: f.write(a_ ) return filename __a = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __a = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __a = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __a = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __a = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : str = datasets.Dataset.from_dict(a_ ) _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(a_ ) ) as con: _UpperCAmelCase : List[Any] = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str, a_: str ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(a_, "rb" ) as f: _UpperCAmelCase : Any = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ): _UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) ) f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ): _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) _UpperCAmelCase : Dict = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(a_, "wb" ) as f: _UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ ) _UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ ) writer.write_table(a_ ) writer.close() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : str = {"data": DATA} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(a_, "w" ) as f: for item in DATA_312: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(a_, "w" ) as f: for item in DATA_STR: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ): import gzip _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ): import gzip _UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ): _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : List[str] = ["0", "1", "2", "3"] _UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Dict = ["0", "1", "2", "3"] _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = ["0", "1", "2", "3"] _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename("unsupported.ext" ) ) f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Optional[Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) return data_dir
17
1
'''simple docstring''' from manim import * class A__ ( UpperCamelCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase : Any = Rectangle(height=0.25 , width=0.25 ) _UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )] _UpperCAmelCase : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : int = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : Optional[Any] = Text("CPU" , font_size=2_4 ) _UpperCAmelCase : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCAmelCase__ ) _UpperCAmelCase : Dict = [mem.copy() for i in range(4 )] _UpperCAmelCase : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : List[Any] = Text("GPU" , font_size=2_4 ) _UpperCAmelCase : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCAmelCase__ ) _UpperCAmelCase : Dict = [mem.copy() for i in range(6 )] _UpperCAmelCase : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : Tuple = Text("Model" , font_size=2_4 ) _UpperCAmelCase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCAmelCase__ ) _UpperCAmelCase : str = [] _UpperCAmelCase : int = [] _UpperCAmelCase : Dict = [] for i, rect in enumerate(lowerCAmelCase__ ): rect.set_stroke(lowerCAmelCase__ ) _UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 ) self.add(lowerCAmelCase__ ) model_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=2_4 ) _UpperCAmelCase : List[str] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : Any = [] for i, rect in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Any = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 ) target.move_to(lowerCAmelCase__ ) ckpt_arr.append(lowerCAmelCase__ ) _UpperCAmelCase : Any = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ ) _UpperCAmelCase : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase : Any = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = MarkupText( F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) _UpperCAmelCase : Dict = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase : int = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : List[Any] = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) _UpperCAmelCase : List[Any] = Text("Disk" , font_size=2_4 ) _UpperCAmelCase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) ) _UpperCAmelCase : List[str] = [] for i, rect in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : List[Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) ) self.play(*lowerCAmelCase__ ) self.play(FadeOut(lowerCAmelCase__ ) ) _UpperCAmelCase : Union[str, Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) ) self.play( FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , ) self.wait()
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: str ): return " ".join( "".join(word[::-1] ) if len(a_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
17
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __a = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0} _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Tuple = batch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Optional[Any] = image_size _UpperCAmelCase : Dict = min_resolution _UpperCAmelCase : str = max_resolution _UpperCAmelCase : List[Any] = size _UpperCAmelCase : Union[str, Any] = do_normalize _UpperCAmelCase : Optional[Any] = do_convert_rgb _UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] _UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" _UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase : str = 2_0_4_8 _UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : Union[str, Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase__ ): _UpperCAmelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches _UpperCAmelCase : Any = "Hello" _UpperCAmelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : List[Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) _UpperCAmelCase : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Union[str, Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase : List[Any] = 3 @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
17
1
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
17
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : List[str] = len(a_ ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase : int = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(a_ )] # Reverse whole list _UpperCAmelCase : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(a_ )] cur -= 1 return arr if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
17
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
1
'''simple docstring''' import os from math import logaa def __UpperCAmelCase ( a_: str = "base_exp.txt" ): _UpperCAmelCase : float = 0 _UpperCAmelCase : Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(a_ ), a_ ) ) ): _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = list(map(a_, line.split("," ) ) ) if x * logaa(a_ ) > largest: _UpperCAmelCase : List[str] = x * logaa(a_ ) _UpperCAmelCase : Union[str, Any] = i + 1 return result if __name__ == "__main__": print(solution())
17
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A__ : """simple docstring""" UpperCamelCase_ : Any = XGLMConfig UpperCamelCase_ : Union[str, Any] = {} UpperCamelCase_ : Dict = '''gelu''' def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : str = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : List[Any] = use_input_mask _UpperCAmelCase : Optional[int] = use_labels _UpperCAmelCase : str = vocab_size _UpperCAmelCase : int = d_model _UpperCAmelCase : Tuple = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Tuple = ffn_dim _UpperCAmelCase : Any = activation_function _UpperCAmelCase : Union[str, Any] = activation_dropout _UpperCAmelCase : Union[str, Any] = attention_dropout _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : int = initializer_range _UpperCAmelCase : Any = None _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : Tuple = 1 def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _UpperCAmelCase : Any = None if self.use_input_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Optional[Any] = self.get_config() _UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : List[Any] = config_and_inputs _UpperCAmelCase : Optional[int] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase_ : Tuple = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Tuple = False def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Dict = TFXGLMModelTester(self ) _UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" super().test_resize_token_embeddings() @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on _UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" ) _UpperCAmelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] ) _UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[int] = "left" # use different length sentences to test batching _UpperCAmelCase : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = inputs["input_ids"] _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) _UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids _UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
17
1
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ): _UpperCAmelCase : Tuple = str(a_ ) dataset_info.write_to_directory(a_ ) _UpperCAmelCase : Any = DatasetInfo.from_directory(a_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a_, "dataset_info.json" ) ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, ) _UpperCAmelCase : Tuple = dataset_info._to_yaml_dict() assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(a_ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(a_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ): _UpperCAmelCase : str = DatasetInfo() _UpperCAmelCase : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1_337 ), } ), ], ) def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(a_ ) dataset_infos_dict.write_to_directory(a_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a_, "README.md" ) )
17
1
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[str] = StableUnCLIPPipeline UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false UpperCamelCase_ : Optional[Any] = False def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _UpperCAmelCase : int = 3_2 _UpperCAmelCase : List[str] = embedder_hidden_size # prior components torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowerCAmelCase__ , num_layers=1 , ) torch.manual_seed(0 ) _UpperCAmelCase : Tuple = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) _UpperCAmelCase : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) _UpperCAmelCase : str = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) _UpperCAmelCase : List[str] = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , ) torch.manual_seed(0 ) _UpperCAmelCase : Any = AutoencoderKL() _UpperCAmelCase : int = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict=0 ) -> Tuple: """simple docstring""" if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) _UpperCAmelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _UpperCAmelCase : Tuple = pipe("anime turle" , generator=lowerCAmelCase__ , output_type="np" ) _UpperCAmelCase : str = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) _UpperCAmelCase : Dict = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _UpperCAmelCase : List[str] = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) _UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
17
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(map(a_, str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __a = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
17
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure)
17
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( a_: str ): for param in module.parameters(): _UpperCAmelCase : Any = False def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" ) return timestamp
17
1
'''simple docstring''' import unittest import numpy as np def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray, a_: np.ndarray, a_: np.ndarray | None = None, ): _UpperCAmelCase : Optional[Any] = np.shape(a_ ) _UpperCAmelCase : List[Any] = np.shape(a_ ) _UpperCAmelCase : Any = np.shape(a_ ) if shape_a[0] != shape_b[0]: _UpperCAmelCase : Tuple = ( "Expected the same number of rows for A and B. " f"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(a_ ) if shape_b[1] != shape_c[1]: _UpperCAmelCase : Tuple = ( "Expected the same number of columns for B and C. " f"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(a_ ) _UpperCAmelCase : int = pseudo_inv if a_inv is None: try: _UpperCAmelCase : Optional[Any] = np.linalg.inv(a_ ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _UpperCAmelCase : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) _UpperCAmelCase : Dict = np.array([[2, 1], [6, 3]] ) _UpperCAmelCase : List[str] = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = np.block([[a, b], [b.T, c]] ) _UpperCAmelCase : List[Any] = np.linalg.det(lowerCAmelCase__ ) _UpperCAmelCase : Any = np.linalg.det(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = np.linalg.det(lowerCAmelCase__ ) self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s ) def _lowerCAmelCase ( self : int ) -> None: """simple docstring""" _UpperCAmelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _UpperCAmelCase : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) _UpperCAmelCase : Any = np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowerCAmelCase__ ): schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> None: """simple docstring""" _UpperCAmelCase : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _UpperCAmelCase : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _UpperCAmelCase : Any = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowerCAmelCase__ ): schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
17
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
1
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __a = TypeVar('T') class A__ ( Generic[T] ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : bool = True ) -> None: """simple docstring""" _UpperCAmelCase : dict[T, list[T]] = {} # dictionary of lists _UpperCAmelCase : Dict = directed def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : T , lowerCAmelCase__ : T ) -> GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__ ) self.adj_list[destination_vertex].append(lowerCAmelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _UpperCAmelCase : List[Any] = [destination_vertex] _UpperCAmelCase : Tuple = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _UpperCAmelCase : Optional[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _UpperCAmelCase : List[Any] = [destination_vertex] _UpperCAmelCase : List[str] = [] return self def __repr__( self : Tuple ) -> str: """simple docstring""" return pformat(self.adj_list )
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class A__ : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Any=9_9 , lowerCAmelCase__ : Optional[int]=3_2 , lowerCAmelCase__ : List[Any]=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Union[str, Any]=3_7 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Dict=None , ) -> Any: """simple docstring""" _UpperCAmelCase : int = parent _UpperCAmelCase : Union[str, Any] = batch_size _UpperCAmelCase : Optional[int] = seq_length _UpperCAmelCase : Union[str, Any] = is_training _UpperCAmelCase : Any = use_input_mask _UpperCAmelCase : List[str] = use_token_type_ids _UpperCAmelCase : Optional[Any] = use_labels _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : List[str] = hidden_size _UpperCAmelCase : Union[str, Any] = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Dict = intermediate_size _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : Tuple = hidden_dropout_prob _UpperCAmelCase : str = attention_probs_dropout_prob _UpperCAmelCase : List[Any] = max_position_embeddings _UpperCAmelCase : str = type_vocab_size _UpperCAmelCase : int = type_sequence_label_size _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : List[str] = num_labels _UpperCAmelCase : int = num_choices _UpperCAmelCase : str = scope def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : str = None if self.use_input_mask: _UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Optional[int] = None if self.use_token_type_ids: _UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : Optional[int] = None if self.use_labels: _UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : int = LlamaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : int = True _UpperCAmelCase : Union[str, Any] = LlamaModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) _UpperCAmelCase : Dict = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) _UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , ) -> Dict: """simple docstring""" _UpperCAmelCase : str = LlamaForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : Dict = True _UpperCAmelCase : Union[str, Any] = LlamaForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass _UpperCAmelCase : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) _UpperCAmelCase : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] _UpperCAmelCase : Dict = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] # select random slice _UpperCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : int = config_and_inputs _UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () UpperCamelCase_ : Any = (LlamaForCausalLM,) if is_torch_available() else () UpperCamelCase_ : Dict = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : Any = False UpperCamelCase_ : Optional[int] = False def _lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" _UpperCAmelCase : Any = LlamaModelTester(self ) _UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def _lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase : Tuple = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : Union[str, Any] = input_dict["input_ids"] _UpperCAmelCase : Dict = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : Tuple = LlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : List[Any] = 3 _UpperCAmelCase : str = "single_label_classification" _UpperCAmelCase : Optional[Any] = input_dict["input_ids"] _UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : Tuple = LlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : List[str] = 3 _UpperCAmelCase : Union[str, Any] = "multi_label_classification" _UpperCAmelCase : str = input_dict["input_ids"] _UpperCAmelCase : List[str] = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase : Union[str, Any] = LlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("LLaMA buffers include complex numbers, which breaks this test" ) def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" pass @parameterized.expand([("linear",), ("dynamic",)] ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) _UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase : List[Any] = LlamaModel(lowerCAmelCase__ ) original_model.to(lowerCAmelCase__ ) original_model.eval() _UpperCAmelCase : Any = original_model(lowerCAmelCase__ ).last_hidden_state _UpperCAmelCase : Tuple = original_model(lowerCAmelCase__ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase : Union[str, Any] = {"type": scaling_type, "factor": 10.0} _UpperCAmelCase : Any = LlamaModel(lowerCAmelCase__ ) scaled_model.to(lowerCAmelCase__ ) scaled_model.eval() _UpperCAmelCase : Any = scaled_model(lowerCAmelCase__ ).last_hidden_state _UpperCAmelCase : List[Any] = scaled_model(lowerCAmelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] _UpperCAmelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" ) _UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 _UpperCAmelCase : Any = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase : List[str] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] _UpperCAmelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" ) _UpperCAmelCase : Dict = model(torch.tensor(lowerCAmelCase__ ) ) # Expected mean on dim = -1 _UpperCAmelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] _UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" ) _UpperCAmelCase : Optional[int] = model(torch.tensor(lowerCAmelCase__ ) ) # Expected mean on dim = -1 _UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase : List[str] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( "Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" ) @slow def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[str] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] _UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" ) _UpperCAmelCase : int = model(torch.tensor(lowerCAmelCase__ ) ) _UpperCAmelCase : List[Any] = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1e-2 , rtol=1e-2 ) # fmt: off _UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , lowerCAmelCase__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Model is curently gated" ) @slow def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : str = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" _UpperCAmelCase : str = "Simply put, the theory of relativity states that " _UpperCAmelCase : Union[str, Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ) _UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ , return_tensors="pt" ) _UpperCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCAmelCase__ ) # greedy generation outputs _UpperCAmelCase : Optional[int] = model.generate(lowerCAmelCase__ , max_new_tokens=6_4 , top_p=lowerCAmelCase__ , temperature=1 , do_sample=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
17
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __UpperCAmelCase ( a_: int ): # A local function to see if a dot lands in the circle. def is_in_circle(a_: float, a_: float ) -> bool: _UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(a_ ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : Optional[int] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ): return mean( function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value) def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ): def identity_function(a_: float ) -> float: return x _UpperCAmelCase : Union[str, Any] = area_under_curve_estimator( a_, a_, a_, a_ ) _UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __UpperCAmelCase ( a_: int ): def function_to_integrate(a_: float ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( a_, a_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( a_: Optional[Any], a_: Dict, a_: Any ): # Initialise PyTorch model _UpperCAmelCase : Tuple = LxmertConfig.from_json_file(a_ ) print(f"""Building PyTorch model from configuration: {config}""" ) _UpperCAmelCase : Union[str, Any] = LxmertForPreTraining(a_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(a_, a_, a_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict(), a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
17
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : str = "" , lowerCAmelCase__ : bool = False ) -> None: """simple docstring""" _UpperCAmelCase : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word _UpperCAmelCase : Union[str, Any] = is_leaf _UpperCAmelCase : Any = prefix def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str ) -> tuple[str, str, str]: """simple docstring""" _UpperCAmelCase : Optional[int] = 0 for q, w in zip(self.prefix , lowerCAmelCase__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : list[str] ) -> None: """simple docstring""" for word in words: self.insert(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str ) -> None: """simple docstring""" if self.prefix == word: _UpperCAmelCase : Optional[int] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: _UpperCAmelCase : Dict = RadixNode(prefix=lowerCAmelCase__ , is_leaf=lowerCAmelCase__ ) else: _UpperCAmelCase : Dict = self.nodes[word[0]] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = incoming_node.match( lowerCAmelCase__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCAmelCase__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: _UpperCAmelCase : Optional[Any] = remaining_prefix _UpperCAmelCase : Optional[Any] = self.nodes[matching_string[0]] _UpperCAmelCase : Optional[Any] = RadixNode(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = aux_node if remaining_word == "": _UpperCAmelCase : Optional[Any] = True else: self.nodes[matching_string[0]].insert(lowerCAmelCase__ ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str ) -> bool: """simple docstring""" _UpperCAmelCase : int = self.nodes.get(word[0] , lowerCAmelCase__ ) if not incoming_node: return False else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = incoming_node.match( lowerCAmelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> bool: """simple docstring""" _UpperCAmelCase : Optional[int] = self.nodes.get(word[0] , lowerCAmelCase__ ) if not incoming_node: return False else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = incoming_node.match( lowerCAmelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCAmelCase__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: _UpperCAmelCase : Optional[int] = list(self.nodes.values() )[0] _UpperCAmelCase : Union[str, Any] = merging_node.is_leaf self.prefix += merging_node.prefix _UpperCAmelCase : Any = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: _UpperCAmelCase : int = False # If there is 1 edge, we merge it with its child else: _UpperCAmelCase : str = list(incoming_node.nodes.values() )[0] _UpperCAmelCase : str = merging_node.is_leaf incoming_node.prefix += merging_node.prefix _UpperCAmelCase : Optional[int] = merging_node.nodes return True def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int = 0 ) -> None: """simple docstring""" if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def __UpperCAmelCase ( ): _UpperCAmelCase : List[Any] = "banana bananas bandana band apple all beast".split() _UpperCAmelCase : Union[str, Any] = RadixNode() root.insert_many(a_ ) assert all(root.find(a_ ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def __UpperCAmelCase ( ): assert test_trie() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = RadixNode() _UpperCAmelCase : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(a_ ) print("Words:", a_ ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
17
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class A__ : """simple docstring""" @staticmethod def _lowerCAmelCase ( *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Tuple ) -> List[Any]: """simple docstring""" pass def __UpperCAmelCase ( a_: List[Any] ): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = pipeline( "document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) _UpperCAmelCase : Dict = INVOICE_URL _UpperCAmelCase : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) ) _UpperCAmelCase : Dict = "What is the placebo?" _UpperCAmelCase : int = [ { "image": load_image(lowerCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = dqa_pipeline(lowerCAmelCase__ , top_k=2 ) self.assertEqual( lowerCAmelCase__ , [ [ {"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )}, {"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) _UpperCAmelCase : Dict = INVOICE_URL _UpperCAmelCase : str = "How many cats are there?" _UpperCAmelCase : List[str] = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] _UpperCAmelCase : str = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ ) _UpperCAmelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably _UpperCAmelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase : str = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(lowerCAmelCase__ , [] ) # We can optionnally pass directly the words and bounding boxes _UpperCAmelCase : Any = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase : List[Any] = [] _UpperCAmelCase : Any = [] _UpperCAmelCase : Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 ) self.assertEqual(lowerCAmelCase__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) _UpperCAmelCase : Any = INVOICE_URL _UpperCAmelCase : List[Any] = "What is the invoice number?" _UpperCAmelCase : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) _UpperCAmelCase : List[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) _UpperCAmelCase : List[str] = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase : Union[str, Any] = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=5_0 , ) _UpperCAmelCase : int = INVOICE_URL _UpperCAmelCase : int = "What is the invoice number?" _UpperCAmelCase : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) _UpperCAmelCase : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) _UpperCAmelCase : Dict = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" _UpperCAmelCase : str = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , ) _UpperCAmelCase : Dict = INVOICE_URL _UpperCAmelCase : str = "What is the invoice number?" _UpperCAmelCase : str = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] , ) _UpperCAmelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] , ) _UpperCAmelCase : Tuple = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2 , ) _UpperCAmelCase : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) ) # This model should also work if `image` is set to None _UpperCAmelCase : int = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ ) _UpperCAmelCase : int = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=5_0 , ) _UpperCAmelCase : Union[str, Any] = INVOICE_URL _UpperCAmelCase : int = "What is the invoice number?" _UpperCAmelCase : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) _UpperCAmelCase : Any = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2 , ) _UpperCAmelCase : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) ) # This model should also work if `image` is set to None _UpperCAmelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] , ) @slow @require_torch def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Tuple = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) _UpperCAmelCase : Tuple = INVOICE_URL _UpperCAmelCase : Union[str, Any] = "What is the invoice number?" _UpperCAmelCase : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass
17
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __UpperCAmelCase ( a_: str, a_: List[str]=10 ): _UpperCAmelCase : Optional[int] = [] for _ in range(a_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __UpperCAmelCase ( a_: int, a_: int=10 ): _UpperCAmelCase : Any = [] for step in range(a_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase : List[Any] = os.path.join(a_, "schedule.bin" ) torch.save(scheduler.state_dict(), a_ ) _UpperCAmelCase : Union[str, Any] = torch.load(a_ ) scheduler.load_state_dict(a_ ) return lrs @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> Optional[int]: """simple docstring""" self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase : str = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): _UpperCAmelCase : Tuple = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase : Dict = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase : Any = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , ) for _ in range(1_0_0_0 ): _UpperCAmelCase : Optional[int] = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCamelCase_ : Dict = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCamelCase_ : Optional[int] = 10 def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]=None ) -> str: """simple docstring""" self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase : Dict = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase : Dict = data _UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase : Optional[Any] = unwrap_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase__ , lowerCAmelCase__ , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) _UpperCAmelCase : Any = scheduler_func(self.optimizer , **lowerCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__ ) # wrap to test picklability of the schedule _UpperCAmelCase : Optional[Any] = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"""failed for {scheduler_func} in save and reload""" ) class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = fn def __call__( self : Tuple , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
17
'''simple docstring''' from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class A__ : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class A__ : """simple docstring""" UpperCamelCase_ : Union[str, Any] = [] def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = obj _UpperCAmelCase : int = target _UpperCAmelCase : Optional[int] = new _UpperCAmelCase : Any = target.split("." )[0] _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Dict = attrs or [] def __enter__( self : List[str] ) -> int: """simple docstring""" *_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: _UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) _UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: _UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCAmelCase : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
17
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class A__ ( metaclass=UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = ['''torch''', '''scipy'''] def __init__( self : List[str] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" requires_backends(self , ["torch", "scipy"] ) @classmethod def _lowerCAmelCase ( cls : List[str] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["torch", "scipy"] ) @classmethod def _lowerCAmelCase ( cls : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls , ["torch", "scipy"] )
17
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __a = datasets.utils.logging.get_logger(__name__) __a = ['names', 'prefix'] __a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] __a = ['encoding_errors', 'on_bad_lines'] __a = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : str = "," UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer" UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Union[int, List[int]]] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[Union[str, List[str]]] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = "." UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = '"' UpperCamelCase_ : int = 0 UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : int = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : int = 1_00_00 UpperCamelCase_ : Optional[datasets.Features] = None UpperCamelCase_ : Optional[str] = "strict" UpperCamelCase_ : Literal["error", "warn", "skip"] = "error" UpperCamelCase_ : Optional[str] = None def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" if self.delimiter is not None: _UpperCAmelCase : Any = self.delimiter if self.column_names is not None: _UpperCAmelCase : List[Any] = self.column_names @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : int = CsvConfig def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = [files] _UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = [files] _UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast _UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCAmelCase : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): _UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): return int((input_a, input_a).count(0 ) == 0 ) def __UpperCAmelCase ( ): assert and_gate(0, 0 ) == 0 assert and_gate(0, 1 ) == 0 assert and_gate(1, 0 ) == 0 assert and_gate(1, 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
17
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if not nums: return 0 _UpperCAmelCase : int = nums[0] _UpperCAmelCase : Dict = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase : Any = ( max_excluding + num, max(a_, a_ ), ) return max(a_, a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=1_3 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=9_9 , lowerCAmelCase__ : Dict=3_2 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : int=3_7 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=5_1_2 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Tuple=None , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : List[str] = batch_size _UpperCAmelCase : Optional[int] = seq_length _UpperCAmelCase : List[Any] = is_training _UpperCAmelCase : List[Any] = use_input_mask _UpperCAmelCase : str = use_token_type_ids _UpperCAmelCase : int = use_labels _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : Tuple = hidden_size _UpperCAmelCase : str = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : Dict = hidden_act _UpperCAmelCase : List[str] = hidden_dropout_prob _UpperCAmelCase : Optional[int] = attention_probs_dropout_prob _UpperCAmelCase : int = max_position_embeddings _UpperCAmelCase : List[Any] = type_vocab_size _UpperCAmelCase : Dict = type_sequence_label_size _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : Dict = num_labels _UpperCAmelCase : str = num_choices _UpperCAmelCase : List[str] = scope def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Tuple = None if self.use_input_mask: _UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Tuple = None if self.use_token_type_ids: _UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : str = None _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Optional[Any] = None if self.use_labels: _UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , use_stable_embedding=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : List[str] = OpenLlamaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = True _UpperCAmelCase : Optional[int] = OpenLlamaModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) _UpperCAmelCase : Tuple = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) _UpperCAmelCase : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , ) -> int: """simple docstring""" _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : int = True _UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass _UpperCAmelCase : Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) _UpperCAmelCase : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] _UpperCAmelCase : Tuple = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] # select random slice _UpperCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Dict = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : Tuple = config_and_inputs _UpperCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else () UpperCamelCase_ : str = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Optional[Any] = False def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase : int = OpenLlamaModelTester(self ) _UpperCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase : Any = type self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Union[str, Any] = input_dict["input_ids"] _UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : Union[str, Any] = OpenLlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : Dict = "single_label_classification" _UpperCAmelCase : str = input_dict["input_ids"] _UpperCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : str = OpenLlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Union[str, Any] = "multi_label_classification" _UpperCAmelCase : str = input_dict["input_ids"] _UpperCAmelCase : str = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase : int = OpenLlamaForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" ) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" pass @parameterized.expand([("linear",), ("dynamic",)] ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : str = ids_tensor([1, 1_0] , config.vocab_size ) _UpperCAmelCase : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase : Optional[Any] = OpenLlamaModel(lowerCAmelCase__ ) original_model.to(lowerCAmelCase__ ) original_model.eval() _UpperCAmelCase : Optional[Any] = original_model(lowerCAmelCase__ ).last_hidden_state _UpperCAmelCase : Any = original_model(lowerCAmelCase__ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase : Dict = {"type": scaling_type, "factor": 10.0} _UpperCAmelCase : Union[str, Any] = OpenLlamaModel(lowerCAmelCase__ ) scaled_model.to(lowerCAmelCase__ ) scaled_model.eval() _UpperCAmelCase : str = scaled_model(lowerCAmelCase__ ).last_hidden_state _UpperCAmelCase : List[str] = scaled_model(lowerCAmelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
17
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): _UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" ) if key.startswith("module.decoder" ): _UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" ) if "norm" in key: _UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] _UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" ) if "layer_norm1" in key: _UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" ) if "layer_norm2" in key: _UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )] _UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" ) if "attn.q" in key: _UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" ) if "attn.proj" in key: _UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" ) if "attn" in key: _UpperCAmelCase : Dict = key.replace("attn", "attention.self" ) if "fc1" in key: _UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" ) if "fc2" in key: _UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" ) if "linear_pred" in key: _UpperCAmelCase : Any = key.replace("linear_pred", "classifier" ) if "linear_fuse" in key: _UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" ) _UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )] _UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" ) if "bot_conv" in key: _UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" ) if "skip_conv1" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" ) if "skip_conv2" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" ) if "fusion1" in key: _UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" ) if "fusion2" in key: _UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" ) if "fusion3" in key: _UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" ) if "fusion" in key and "conv" in key: _UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): _UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" ) _UpperCAmelCase : int = value return new_state_dict def __UpperCAmelCase ( a_: str, a_: List[Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _UpperCAmelCase : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] _UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]] _UpperCAmelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :] def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ): _UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _UpperCAmelCase : Dict = GLPNImageProcessor() # prepare image _UpperCAmelCase : List[Any] = prepare_img() _UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict _UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) ) # rename keys _UpperCAmelCase : List[str] = rename_keys(a_ ) # key and value matrices need special treatment read_in_k_v(a_, a_ ) # create HuggingFace model and load state dict _UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass _UpperCAmelCase : Dict = model(a_ ) _UpperCAmelCase : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _UpperCAmelCase : Optional[Any] = torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: _UpperCAmelCase : Tuple = torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) _UpperCAmelCase : Dict = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, ) image_processor.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
17
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json', } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = '''nllb-moe''' UpperCamelCase_ : Union[str, Any] = ['''past_key_values'''] UpperCamelCase_ : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=1_2_8_1_1_2 , lowerCAmelCase__ : List[str]=1_0_2_4 , lowerCAmelCase__ : Optional[int]=1_2 , lowerCAmelCase__ : Tuple=4_0_9_6 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Any=4_0_9_6 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : List[Any]=0.05 , lowerCAmelCase__ : List[str]=0.05 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]="relu" , lowerCAmelCase__ : Any=1_0_2_4 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Dict="float32" , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Union[str, Any]=1_2_8 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : str=0.001 , lowerCAmelCase__ : Union[str, Any]=0.001 , lowerCAmelCase__ : Union[str, Any]="all" , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=1.0 , lowerCAmelCase__ : Any=0.2 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : int , ) -> int: """simple docstring""" _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : Tuple = max_position_embeddings _UpperCAmelCase : Union[str, Any] = d_model _UpperCAmelCase : str = encoder_ffn_dim _UpperCAmelCase : Tuple = encoder_layers _UpperCAmelCase : Tuple = encoder_attention_heads _UpperCAmelCase : List[str] = decoder_ffn_dim _UpperCAmelCase : List[str] = decoder_layers _UpperCAmelCase : Optional[int] = decoder_attention_heads _UpperCAmelCase : int = dropout _UpperCAmelCase : List[Any] = attention_dropout _UpperCAmelCase : Any = activation_dropout _UpperCAmelCase : Union[str, Any] = activation_function _UpperCAmelCase : str = init_std _UpperCAmelCase : Union[str, Any] = encoder_layerdrop _UpperCAmelCase : List[Any] = decoder_layerdrop _UpperCAmelCase : int = use_cache _UpperCAmelCase : str = encoder_layers _UpperCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCAmelCase : List[Any] = router_z_loss_coef _UpperCAmelCase : Optional[int] = router_aux_loss_coef _UpperCAmelCase : int = decoder_sparse_step _UpperCAmelCase : Optional[Any] = encoder_sparse_step _UpperCAmelCase : List[Any] = num_experts _UpperCAmelCase : Optional[int] = expert_capacity _UpperCAmelCase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) _UpperCAmelCase : int = router_dtype _UpperCAmelCase : int = router_ignore_padding_tokens _UpperCAmelCase : Union[str, Any] = batch_prioritized_routing _UpperCAmelCase : Optional[int] = second_expert_policy _UpperCAmelCase : Union[str, Any] = normalize_router_prob_before_dropping _UpperCAmelCase : int = moe_eval_capacity_token_fraction _UpperCAmelCase : List[str] = moe_token_dropout _UpperCAmelCase : List[Any] = output_router_logits super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
17
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = 10 _UpperCAmelCase : int = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) _UpperCAmelCase : List[str] = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(a_ ) ), }, features=a_, ) return dataset @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=a_ ) return filename # FILE_CONTENT + files __a = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt" _UpperCAmelCase : Tuple = FILE_CONTENT with open(a_, "w" ) as f: f.write(a_ ) return filename @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" _UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" ) with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import gzip _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) _UpperCAmelCase : Any = bytes(a_, "utf-8" ) with gzip.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): if datasets.config.LZ4_AVAILABLE: import lza.frame _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" _UpperCAmelCase : str = bytes(a_, "utf-8" ) with lza.frame.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Any ): if datasets.config.PY7ZR_AVAILABLE: import pyazr _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(a_, "w" ) as archive: archive.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: List[str] ): import tarfile _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): import lzma _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz" _UpperCAmelCase : List[str] = bytes(a_, "utf-8" ) with lzma.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: Tuple ): import zipfile _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst" _UpperCAmelCase : int = bytes(a_, "utf-8" ) with zstd.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): _UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml" _UpperCAmelCase : Tuple = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(a_, "w" ) as f: f.write(a_ ) return filename __a = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __a = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __a = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __a = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __a = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : str = datasets.Dataset.from_dict(a_ ) _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(a_ ) ) as con: _UpperCAmelCase : List[Any] = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str, a_: str ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(a_, "rb" ) as f: _UpperCAmelCase : Any = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ): _UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) ) f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ): _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) _UpperCAmelCase : Dict = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(a_, "wb" ) as f: _UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ ) _UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ ) writer.write_table(a_ ) writer.close() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : str = {"data": DATA} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(a_, "w" ) as f: for item in DATA_312: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(a_, "w" ) as f: for item in DATA_STR: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ): import gzip _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ): import gzip _UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ): _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : List[str] = ["0", "1", "2", "3"] _UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Dict = ["0", "1", "2", "3"] _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = ["0", "1", "2", "3"] _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename("unsupported.ext" ) ) f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Optional[Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) return data_dir
17
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def __UpperCAmelCase ( a_: str, a_: Dict, a_: Optional[int], a_: int ): _UpperCAmelCase : str = s.rsplit(a_, a_ ) return new.join(a_ ) def __UpperCAmelCase ( a_: Union[str, Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Dict = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _UpperCAmelCase : Optional[Any] = key.replace(f"""{group_key}.""", f"""{group_key}.group.""" ) if "res_path" in key: _UpperCAmelCase : Optional[int] = key.replace("res_path.", "res_path.path." ) if key.endswith(".w" ): _UpperCAmelCase : Optional[int] = rreplace(a_, ".w", ".weight", 1 ) if key.endswith(".b" ): _UpperCAmelCase : Any = rreplace(a_, ".b", ".bias", 1 ) _UpperCAmelCase : Optional[Any] = value.float() return upgrade @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Dict, a_: Optional[int]=None, a_: Optional[int]=True ): from dall_e import Encoder _UpperCAmelCase : Dict = Encoder() if os.path.exists(a_ ): _UpperCAmelCase : Any = torch.load(a_ ) else: _UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(a_ ) if isinstance(a_, a_ ): _UpperCAmelCase : List[Any] = ckpt.state_dict() encoder.load_state_dict(a_ ) if config_path is not None: _UpperCAmelCase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(a_ ) else: _UpperCAmelCase : List[str] = FlavaImageCodebookConfig() _UpperCAmelCase : int = FlavaImageCodebook(a_ ).eval() _UpperCAmelCase : str = encoder.state_dict() _UpperCAmelCase : Optional[int] = upgrade_state_dict(a_ ) hf_model.load_state_dict(a_ ) _UpperCAmelCase : List[str] = hf_model.state_dict() _UpperCAmelCase : Optional[Any] = count_parameters(a_ ) _UpperCAmelCase : Optional[Any] = count_parameters(a_ ) assert torch.allclose(a_, a_, atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(a_ ) else: return hf_state_dict if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __a = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0} _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Tuple = batch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Optional[Any] = image_size _UpperCAmelCase : Dict = min_resolution _UpperCAmelCase : str = max_resolution _UpperCAmelCase : List[Any] = size _UpperCAmelCase : Union[str, Any] = do_normalize _UpperCAmelCase : Optional[Any] = do_convert_rgb _UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] _UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" _UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase : str = 2_0_4_8 _UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : Union[str, Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase__ ): _UpperCAmelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches _UpperCAmelCase : Any = "Hello" _UpperCAmelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : List[Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) _UpperCAmelCase : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Union[str, Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase : List[Any] = 3 @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
17
1
'''simple docstring''' import os from collections.abc import Iterator def __UpperCAmelCase ( a_: str = "." ): for dir_path, dir_names, filenames in os.walk(a_ ): _UpperCAmelCase : Tuple = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(a_ )[1] in (".py", ".ipynb"): yield os.path.join(a_, a_ ).lstrip("./" ) def __UpperCAmelCase ( a_: List[Any] ): return f"""{i * ' '}*""" if i else "\n##" def __UpperCAmelCase ( a_: str, a_: str ): _UpperCAmelCase : Union[str, Any] = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(a_ ) or old_parts[i] != new_part) and new_part: print(f"""{md_prefix(a_ )} {new_part.replace('_', ' ' ).title()}""" ) return new_path def __UpperCAmelCase ( a_: str = "." ): _UpperCAmelCase : int = "" for filepath in sorted(good_file_paths(a_ ) ): _UpperCAmelCase , _UpperCAmelCase : int = os.path.split(a_ ) if filepath != old_path: _UpperCAmelCase : List[Any] = print_path(a_, a_ ) _UpperCAmelCase : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0 _UpperCAmelCase : str = f"""{filepath}/{filename}""".replace(" ", "%20" ) _UpperCAmelCase : Any = os.path.splitext(filename.replace("_", " " ).title() )[0] print(f"""{md_prefix(a_ )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md('.')
17
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
1
'''simple docstring''' __a = 65_521 def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : List[str] = 1 _UpperCAmelCase : str = 0 for plain_chr in plain_text: _UpperCAmelCase : Union[str, Any] = (a + ord(a_ )) % MOD_ADLER _UpperCAmelCase : List[str] = (b + a) % MOD_ADLER return (b << 16) | a
17
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A__ : """simple docstring""" UpperCamelCase_ : Any = XGLMConfig UpperCamelCase_ : Union[str, Any] = {} UpperCamelCase_ : Dict = '''gelu''' def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : str = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : List[Any] = use_input_mask _UpperCAmelCase : Optional[int] = use_labels _UpperCAmelCase : str = vocab_size _UpperCAmelCase : int = d_model _UpperCAmelCase : Tuple = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Tuple = ffn_dim _UpperCAmelCase : Any = activation_function _UpperCAmelCase : Union[str, Any] = activation_dropout _UpperCAmelCase : Union[str, Any] = attention_dropout _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : int = initializer_range _UpperCAmelCase : Any = None _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : Tuple = 1 def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _UpperCAmelCase : Any = None if self.use_input_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Optional[Any] = self.get_config() _UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : List[Any] = config_and_inputs _UpperCAmelCase : Optional[int] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase_ : Tuple = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Tuple = False def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Dict = TFXGLMModelTester(self ) _UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" super().test_resize_token_embeddings() @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on _UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" ) _UpperCAmelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] ) _UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[int] = "left" # use different length sentences to test batching _UpperCAmelCase : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = inputs["input_ids"] _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) _UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids _UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
17
1
'''simple docstring''' __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __UpperCAmelCase ( a_: Dict, a_: Optional[int], a_: Any, a_: Optional[Any] ): # Return True if there is node that has not iterated. _UpperCAmelCase : List[str] = [False] * len(a_ ) _UpperCAmelCase : List[Any] = [s] _UpperCAmelCase : List[str] = True while queue: _UpperCAmelCase : Dict = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(a_ ) _UpperCAmelCase : Optional[Any] = True _UpperCAmelCase : Union[str, Any] = u return visited[t] def __UpperCAmelCase ( a_: str, a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[Any] = [-1] * (len(a_ )) _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : Dict = [] _UpperCAmelCase : List[Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(a_, a_, a_, a_ ): _UpperCAmelCase : int = float("Inf" ) _UpperCAmelCase : List[str] = sink while s != source: # Find the minimum value in select path _UpperCAmelCase : List[str] = min(a_, graph[parent[s]][s] ) _UpperCAmelCase : Optional[Any] = parent[s] max_flow += path_flow _UpperCAmelCase : List[str] = sink while v != source: _UpperCAmelCase : Tuple = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _UpperCAmelCase : str = parent[v] for i in range(len(a_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
17
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ): _UpperCAmelCase : Tuple = str(a_ ) dataset_info.write_to_directory(a_ ) _UpperCAmelCase : Any = DatasetInfo.from_directory(a_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a_, "dataset_info.json" ) ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, ) _UpperCAmelCase : Tuple = dataset_info._to_yaml_dict() assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(a_ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(a_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ): _UpperCAmelCase : str = DatasetInfo() _UpperCAmelCase : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1_337 ), } ), ], ) def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(a_ ) dataset_infos_dict.write_to_directory(a_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a_, "README.md" ) )
17
1
'''simple docstring''' from scipy.stats import spearmanr import datasets __a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' __a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' __a = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[str] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
17
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(map(a_, str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
1
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __a = logging.get_logger(__name__) __a = TypeVar('DatasetType', Dataset, IterableDataset) def __UpperCAmelCase ( a_: List[DatasetType], a_: Optional[List[float]] = None, a_: Optional[int] = None, a_: Optional[DatasetInfo] = None, a_: Optional[NamedSplit] = None, a_: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(a_ ): if not isinstance(a_, (Dataset, IterableDataset) ): if isinstance(a_, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(a_ )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = ( (Dataset, IterableDataset) if isinstance(a_, a_ ) else (IterableDataset, Dataset) ) elif not isinstance(a_, a_ ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( a_, a_, a_, info=a_, split=a_, stopping_strategy=a_ ) else: return _interleave_iterable_datasets( a_, a_, a_, info=a_, split=a_, stopping_strategy=a_ ) def __UpperCAmelCase ( a_: List[DatasetType], a_: Optional[DatasetInfo] = None, a_: Optional[NamedSplit] = None, a_: int = 0, ): if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(a_ ): if not isinstance(a_, (Dataset, IterableDataset) ): if isinstance(a_, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(a_ )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(a_ ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(a_ ).__name__}.""" ) if i == 0: _UpperCAmelCase , _UpperCAmelCase : Optional[int] = ( (Dataset, IterableDataset) if isinstance(a_, a_ ) else (IterableDataset, Dataset) ) elif not isinstance(a_, a_ ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(a_, info=a_, split=a_, axis=a_ ) else: return _concatenate_iterable_datasets(a_, info=a_, split=a_, axis=a_ )
17
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: list[int] ): if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) _UpperCAmelCase : List[Any] = sum(a_ ) / len(a_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json' # See all FNet models at https://huggingface.co/models?filter=fnet } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[Any] = '''fnet''' def __init__( self : str , lowerCAmelCase__ : Dict=3_2_0_0_0 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : Any=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu_new" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : str=1e-12 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Tuple=2 , **lowerCAmelCase__ : Any , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCAmelCase : str = vocab_size _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : Tuple = hidden_size _UpperCAmelCase : Dict = num_hidden_layers _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : Dict = hidden_dropout_prob _UpperCAmelCase : Dict = initializer_range _UpperCAmelCase : Optional[Any] = type_vocab_size _UpperCAmelCase : List[Any] = layer_norm_eps _UpperCAmelCase : Any = use_tpu_fourier_optimizations _UpperCAmelCase : Tuple = tpu_short_seq_length
17
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( a_: str ): for param in module.parameters(): _UpperCAmelCase : Any = False def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" ) return timestamp
17
1
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class A__ ( UpperCamelCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _UpperCAmelCase : Tuple = tempfile.mkdtemp() _UpperCAmelCase : Optional[Any] = 8 # DPR tok _UpperCAmelCase : int = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _UpperCAmelCase : str = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) _UpperCAmelCase : Dict = os.path.join(lowerCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok _UpperCAmelCase : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] _UpperCAmelCase : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) _UpperCAmelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"} _UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) _UpperCAmelCase : Any = os.path.join(lowerCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Tuple ) -> DPRQuestionEncoderTokenizer: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _lowerCAmelCase ( self : Tuple ) -> DPRContextEncoderTokenizer: """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def _lowerCAmelCase ( self : str ) -> BartTokenizer: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = self.get_dummy_dataset() _UpperCAmelCase : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: _UpperCAmelCase : str = dataset _UpperCAmelCase : Optional[int] = RagRetriever( lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : bool ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.get_dummy_dataset() _UpperCAmelCase : int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: _UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , "dataset" ) _UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset _UpperCAmelCase : Optional[Any] = RagRetriever( lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: _UpperCAmelCase : Tuple = RagRetriever( lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase__ ) , ) return retriever def _lowerCAmelCase ( self : str ) -> Any: """simple docstring""" _UpperCAmelCase : Any = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) _UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) _UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) _UpperCAmelCase : Optional[Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(lowerCAmelCase__ , open(lowerCAmelCase__ , "wb" ) ) _UpperCAmelCase : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) _UpperCAmelCase : List[str] = RagRetriever( lowerCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : List[str] = 1 _UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever() _UpperCAmelCase : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" _UpperCAmelCase : Tuple = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: _UpperCAmelCase : List[Any] = self.get_dummy_dataset() retriever.save_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : str = RagRetriever.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Optional[int] = retriever.retrieve(lowerCAmelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = 1 _UpperCAmelCase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : str = RagRetriever.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Optional[int] = retriever.retrieve(lowerCAmelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" _UpperCAmelCase : Dict = 1 _UpperCAmelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ ) _UpperCAmelCase : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , lowerCAmelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Any = RagRetriever.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Optional[int] = retriever.retrieve(lowerCAmelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" _UpperCAmelCase : Tuple = 1 _UpperCAmelCase : Dict = self.get_dummy_legacy_index_retriever() _UpperCAmelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = retriever.retrieve(lowerCAmelCase__ , n_docs=lowerCAmelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , lowerCAmelCase__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Tuple = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Any = RagRetriever.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Any = retriever.retrieve(lowerCAmelCase__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" import torch _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[int] = self.get_dummy_canonical_hf_index_retriever() _UpperCAmelCase : Dict = [[5, 7], [1_0, 1_1]] _UpperCAmelCase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Dict = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) _UpperCAmelCase : Tuple = retriever( lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ , return_tensors="pt" , ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = self.get_dpr_ctx_encoder_tokenizer() _UpperCAmelCase : Dict = 1 _UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase__ ) retriever.set_ctx_encoder_tokenizer(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = [[5, 7], [1_0, 1_1]] _UpperCAmelCase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _UpperCAmelCase : Tuple = retriever(lowerCAmelCase__ , lowerCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=lowerCAmelCase__ ) self.assertEqual( len(lowerCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , lowerCAmelCase__ ) # check for doc token related keys in dictionary.
17
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
1
'''simple docstring''' from ... import PretrainedConfig __a = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP UpperCamelCase_ : Optional[Any] = '''nezha''' def __init__( self : Any , lowerCAmelCase__ : Optional[int]=2_1_1_2_8 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : str=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1e-12 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : List[Any] , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCAmelCase : int = vocab_size _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Optional[Any] = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Dict = hidden_act _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : Optional[Any] = hidden_dropout_prob _UpperCAmelCase : Optional[int] = attention_probs_dropout_prob _UpperCAmelCase : Optional[Any] = max_position_embeddings _UpperCAmelCase : Tuple = max_relative_position _UpperCAmelCase : Optional[Any] = type_vocab_size _UpperCAmelCase : int = initializer_range _UpperCAmelCase : List[Any] = layer_norm_eps _UpperCAmelCase : int = classifier_dropout _UpperCAmelCase : Any = use_cache
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class A__ ( UpperCamelCase ): """simple docstring""" def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" _UpperCAmelCase : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_encoder_blocks" ) ) class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str=1_3 , lowerCAmelCase__ : int=6_4 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[Any]=[2, 2, 2, 2] , lowerCAmelCase__ : int=[8, 4, 2, 1] , lowerCAmelCase__ : Tuple=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase__ : Union[str, Any]=[1, 4, 8, 1_6] , lowerCAmelCase__ : Union[str, Any]=[1, 2, 4, 8] , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = parent _UpperCAmelCase : Optional[Any] = batch_size _UpperCAmelCase : str = image_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Any = num_encoder_blocks _UpperCAmelCase : int = sr_ratios _UpperCAmelCase : Dict = depths _UpperCAmelCase : int = hidden_sizes _UpperCAmelCase : List[Any] = downsampling_rates _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Dict = is_training _UpperCAmelCase : List[Any] = use_labels _UpperCAmelCase : int = hidden_act _UpperCAmelCase : List[Any] = hidden_dropout_prob _UpperCAmelCase : Dict = attention_probs_dropout_prob _UpperCAmelCase : Union[str, Any] = initializer_range _UpperCAmelCase : Union[str, Any] = num_labels _UpperCAmelCase : Optional[Any] = scope def _lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase : str = None if self.use_labels: _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase : List[str] = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[Any] = SegformerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Dict = model(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Dict = self.num_labels _UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Tuple = model(lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _UpperCAmelCase : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = 1 _UpperCAmelCase : Dict = SegformerForSemanticSegmentation(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Tuple = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = config_and_inputs _UpperCAmelCase : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) UpperCamelCase_ : List[Any] = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : str = False UpperCamelCase_ : List[str] = False UpperCamelCase_ : int = False def _lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = SegformerModelTester(self ) _UpperCAmelCase : int = SegformerConfigTester(self , config_class=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" pass def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : int = model_class(lowerCAmelCase__ ) _UpperCAmelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : List[str] = [*signature.parameters.keys()] _UpperCAmelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: _UpperCAmelCase : str = True _UpperCAmelCase : Any = False _UpperCAmelCase : List[str] = True _UpperCAmelCase : Dict = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): _UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = outputs.attentions _UpperCAmelCase : str = sum(self.model_tester.depths ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase : int = True _UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): _UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : str = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first attentions (first block, first layer) _UpperCAmelCase : List[Any] = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) _UpperCAmelCase : Any = (self.model_tester.image_size // 3_2) ** 2 _UpperCAmelCase : Union[str, Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) _UpperCAmelCase : Dict = len(lowerCAmelCase__ ) # Check attention is always last and order is fine _UpperCAmelCase : Optional[Any] = True _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : str = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): _UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) ) _UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first attentions (first block, first layer) _UpperCAmelCase : Dict = (self.model_tester.image_size // 4) ** 2 _UpperCAmelCase : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" def check_hidden_states_output(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ): _UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): _UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : str = outputs.hidden_states _UpperCAmelCase : int = self.model_tester.num_encoder_blocks self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Dict = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase : Optional[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase__ ): continue _UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() _UpperCAmelCase : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = model(**lowerCAmelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" pass @slow def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : str = SegformerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( ): _UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = prepare_img() _UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) _UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): _UpperCAmelCase : Dict = model(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) ) @slow def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[Any] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) _UpperCAmelCase : int = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCAmelCase__ ) _UpperCAmelCase : Dict = prepare_img() _UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) _UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): _UpperCAmelCase : int = model(lowerCAmelCase__ ) _UpperCAmelCase : Dict = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-1 ) ) @slow def _lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" _UpperCAmelCase : Optional[int] = SegformerImageProcessor( image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) _UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( lowerCAmelCase__ ) _UpperCAmelCase : List[str] = prepare_img() _UpperCAmelCase : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) _UpperCAmelCase : Any = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): _UpperCAmelCase : Tuple = model(lowerCAmelCase__ ) _UpperCAmelCase : Any = outputs.logits.detach().cpu() _UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_0_0, 3_0_0)] ) _UpperCAmelCase : Dict = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ ) _UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ) _UpperCAmelCase : Any = torch.Size((1_2_8, 1_2_8) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
17
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __UpperCAmelCase ( a_: int ): # A local function to see if a dot lands in the circle. def is_in_circle(a_: float, a_: float ) -> bool: _UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(a_ ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : Optional[int] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ): return mean( function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value) def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ): def identity_function(a_: float ) -> float: return x _UpperCAmelCase : Union[str, Any] = area_under_curve_estimator( a_, a_, a_, a_ ) _UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __UpperCAmelCase ( a_: int ): def function_to_integrate(a_: float ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( a_, a_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import os import sys import unittest __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __a = os.path.join(git_repo_path, 'src', 'transformers') __a = '\n{0} = None\n' __a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(lowerCAmelCase__ ) _UpperCAmelCase : int = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(lowerCAmelCase__ , "tokenizers" ) _UpperCAmelCase : Any = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(lowerCAmelCase__ , "tensorflow_text" ) _UpperCAmelCase : int = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(lowerCAmelCase__ , "sentencepiece_and_tokenizers" ) _UpperCAmelCase : Optional[Any] = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(lowerCAmelCase__ , "sentencepiece_and_tensorflow_text" ) _UpperCAmelCase : List[str] = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(lowerCAmelCase__ , "sentencepiece_and_tokenizers_and_vision" ) def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , lowerCAmelCase__ ) self.assertIn("tensorflow_text" , lowerCAmelCase__ ) self.assertIn("sentencepiece_and_tokenizers" , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def _lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(lowerCAmelCase__ , "\nCONSTANT = None\n" ) _UpperCAmelCase : Optional[Any] = create_dummy_object("function" , "'torch'" ) self.assertEqual( lowerCAmelCase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) _UpperCAmelCase : Union[str, Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" _UpperCAmelCase : Union[str, Any] = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _UpperCAmelCase : Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" _UpperCAmelCase : Any = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , lowerCAmelCase__ )
17
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : int = LDMTextToImagePipeline UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } UpperCamelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } UpperCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : Dict = False def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = AutoencoderKL( block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _UpperCAmelCase : List[str] = CLIPTextModel(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase : Any = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=0 ) -> Union[str, Any]: """simple docstring""" if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : str = self.get_dummy_components() _UpperCAmelCase : Tuple = LDMTextToImagePipeline(**lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : str = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_6, 1_6, 3) _UpperCAmelCase : List[str] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=torch.floataa , lowerCAmelCase__ : Tuple=0 ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = torch.manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 3_2, 3_2) ) _UpperCAmelCase : Union[str, Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) _UpperCAmelCase : Any = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = self.get_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_5_6, 2_5_6, 3) _UpperCAmelCase : int = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _UpperCAmelCase : Optional[int] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=torch.floataa , lowerCAmelCase__ : Any=0 ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = torch.manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : int = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 3_2, 3_2) ) _UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 5_0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : str = self.get_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = pipe(**lowerCAmelCase__ ).images[0] _UpperCAmelCase : int = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) _UpperCAmelCase : Any = np.abs(expected_image - image ).max() assert max_diff < 1e-3
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' from __future__ import annotations from typing import Any class A__ : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int ) -> None: """simple docstring""" _UpperCAmelCase : List[str] = num_of_nodes _UpperCAmelCase : list[list[int]] = [] _UpperCAmelCase : dict[int, int] = {} def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None: """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int ) -> int: """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int ) -> None: """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: _UpperCAmelCase : str = self.find_component(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None: """simple docstring""" if component_size[u_node] <= component_size[v_node]: _UpperCAmelCase : List[Any] = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: _UpperCAmelCase : Union[str, Any] = self.find_component(lowerCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _UpperCAmelCase : Dict = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = edge _UpperCAmelCase : int = self.m_component[u] _UpperCAmelCase : Dict = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _UpperCAmelCase : List[str] = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = edge _UpperCAmelCase : Union[str, Any] = self.m_component[u] _UpperCAmelCase : List[str] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _UpperCAmelCase : Dict = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __UpperCAmelCase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
1
'''simple docstring''' from collections.abc import Generator from math import sin def __UpperCAmelCase ( a_: bytes ): if len(a_ ) != 32: raise ValueError("Input must be of length 32" ) _UpperCAmelCase : Optional[int] = B"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __UpperCAmelCase ( a_: int ): if i < 0: raise ValueError("Input must be non-negative" ) _UpperCAmelCase : Any = format(a_, "08x" )[-8:] _UpperCAmelCase : Optional[Any] = B"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def __UpperCAmelCase ( a_: bytes ): _UpperCAmelCase : Union[str, Any] = B"" for char in message: bit_string += format(a_, "08b" ).encode("utf-8" ) _UpperCAmelCase : int = format(len(a_ ), "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(a_ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __UpperCAmelCase ( a_: bytes ): if len(a_ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0, len(a_ ), 512 ): _UpperCAmelCase : Optional[Any] = bit_string[pos : pos + 512] _UpperCAmelCase : int = [] for i in range(0, 512, 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) ) yield block_words def __UpperCAmelCase ( a_: int ): if i < 0: raise ValueError("Input must be non-negative" ) _UpperCAmelCase : Optional[Any] = format(a_, "032b" ) _UpperCAmelCase : Any = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(a_, 2 ) def __UpperCAmelCase ( a_: int, a_: int ): return (a + b) % 2**32 def __UpperCAmelCase ( a_: int, a_: int ): if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __UpperCAmelCase ( a_: bytes ): _UpperCAmelCase : Optional[int] = preprocess(a_ ) _UpperCAmelCase : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _UpperCAmelCase : List[Any] = 0X67_452_301 _UpperCAmelCase : List[Any] = 0XEF_CDA_B89 _UpperCAmelCase : str = 0X98_BAD_CFE _UpperCAmelCase : Union[str, Any] = 0X10_325_476 _UpperCAmelCase : Dict = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(a_ ): _UpperCAmelCase : Optional[Any] = aa _UpperCAmelCase : Dict = ba _UpperCAmelCase : Optional[int] = ca _UpperCAmelCase : int = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) _UpperCAmelCase : int = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _UpperCAmelCase : int = c ^ (d & (b ^ c)) _UpperCAmelCase : Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: _UpperCAmelCase : Dict = b ^ c ^ d _UpperCAmelCase : Any = (3 * i + 5) % 16 else: _UpperCAmelCase : Dict = c ^ (b | not_aa(a_ )) _UpperCAmelCase : List[str] = (7 * i) % 16 _UpperCAmelCase : str = (f + a + added_consts[i] + block_words[g]) % 2**32 _UpperCAmelCase : Optional[int] = d _UpperCAmelCase : str = c _UpperCAmelCase : List[str] = b _UpperCAmelCase : List[Any] = sum_aa(a_, left_rotate_aa(a_, shift_amounts[i] ) ) # Add hashed chunk to running total _UpperCAmelCase : Optional[int] = sum_aa(a_, a_ ) _UpperCAmelCase : List[str] = sum_aa(a_, a_ ) _UpperCAmelCase : str = sum_aa(a_, a_ ) _UpperCAmelCase : Union[str, Any] = sum_aa(a_, a_ ) _UpperCAmelCase : Optional[int] = reformat_hex(a_ ) + reformat_hex(a_ ) + reformat_hex(a_ ) + reformat_hex(a_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: list, a_: list ): _validate_point(a_ ) _validate_point(a_ ) if len(a_ ) != len(a_ ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(a_, a_ ) ) ) def __UpperCAmelCase ( a_: list[float] ): if point: if isinstance(a_, a_ ): for item in point: if not isinstance(a_, (int, float) ): _UpperCAmelCase : Tuple = ( "Expected a list of numbers as input, found " f"""{type(a_ ).__name__}""" ) raise TypeError(a_ ) else: _UpperCAmelCase : List[str] = f"""Expected a list of numbers as input, found {type(a_ ).__name__}""" raise TypeError(a_ ) else: raise ValueError("Missing an input" ) def __UpperCAmelCase ( a_: list, a_: list ): _validate_point(a_ ) _validate_point(a_ ) if len(a_ ) != len(a_ ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(a_, a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class A__ : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class A__ : """simple docstring""" UpperCamelCase_ : Union[str, Any] = [] def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = obj _UpperCAmelCase : int = target _UpperCAmelCase : Optional[int] = new _UpperCAmelCase : Any = target.split("." )[0] _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Dict = attrs or [] def __enter__( self : List[str] ) -> int: """simple docstring""" *_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: _UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) _UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: _UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCAmelCase : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
17
1
'''simple docstring''' from math import sqrt def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[int] = 0 for i in range(1, int(sqrt(a_ ) + 1 ) ): if n % i == 0 and i != sqrt(a_ ): total += i + n // i elif i == sqrt(a_ ): total += i return total - n def __UpperCAmelCase ( a_: int = 10_000 ): _UpperCAmelCase : List[str] = sum( i for i in range(1, a_ ) if sum_of_divisors(sum_of_divisors(a_ ) ) == i and sum_of_divisors(a_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __a = datasets.utils.logging.get_logger(__name__) __a = ['names', 'prefix'] __a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] __a = ['encoding_errors', 'on_bad_lines'] __a = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : str = "," UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer" UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Union[int, List[int]]] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[Union[str, List[str]]] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = "." UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = '"' UpperCamelCase_ : int = 0 UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : int = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : int = 1_00_00 UpperCamelCase_ : Optional[datasets.Features] = None UpperCamelCase_ : Optional[str] = "strict" UpperCamelCase_ : Literal["error", "warn", "skip"] = "error" UpperCamelCase_ : Optional[str] = None def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" if self.delimiter is not None: _UpperCAmelCase : Any = self.delimiter if self.column_names is not None: _UpperCAmelCase : List[Any] = self.column_names @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : int = CsvConfig def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = [files] _UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = [files] _UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast _UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCAmelCase : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): _UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
17
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if not nums: return 0 _UpperCAmelCase : int = nums[0] _UpperCAmelCase : Dict = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase : Any = ( max_excluding + num, max(a_, a_ ), ) return max(a_, a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if not nums: return 0 _UpperCAmelCase : int = nums[0] _UpperCAmelCase : Dict = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase : Any = ( max_excluding + num, max(a_, a_ ), ) return max(a_, a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=UpperCamelCase ) class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase_ : ClassVar[Features] = Features({'''image''': Image()} ) UpperCamelCase_ : ClassVar[Features] = Features({'''labels''': ClassLabel} ) UpperCamelCase_ : str = "image" UpperCamelCase_ : str = "labels" def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple ) -> Dict: """simple docstring""" if self.label_column not in features: raise ValueError(F"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , lowerCAmelCase__ ): raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" ) _UpperCAmelCase : Optional[int] = copy.deepcopy(self ) _UpperCAmelCase : Optional[int] = self.label_schema.copy() _UpperCAmelCase : List[str] = features[self.label_column] _UpperCAmelCase : Optional[Any] = label_schema return task_template @property def _lowerCAmelCase ( self : str ) -> Dict[str, str]: """simple docstring""" return { self.image_column: "image", self.label_column: "labels", }
17
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): _UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" ) if key.startswith("module.decoder" ): _UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" ) if "norm" in key: _UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] _UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" ) if "layer_norm1" in key: _UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" ) if "layer_norm2" in key: _UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )] _UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" ) if "attn.q" in key: _UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" ) if "attn.proj" in key: _UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" ) if "attn" in key: _UpperCAmelCase : Dict = key.replace("attn", "attention.self" ) if "fc1" in key: _UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" ) if "fc2" in key: _UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" ) if "linear_pred" in key: _UpperCAmelCase : Any = key.replace("linear_pred", "classifier" ) if "linear_fuse" in key: _UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" ) _UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )] _UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" ) if "bot_conv" in key: _UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" ) if "skip_conv1" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" ) if "skip_conv2" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" ) if "fusion1" in key: _UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" ) if "fusion2" in key: _UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" ) if "fusion3" in key: _UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" ) if "fusion" in key and "conv" in key: _UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): _UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" ) _UpperCAmelCase : int = value return new_state_dict def __UpperCAmelCase ( a_: str, a_: List[Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _UpperCAmelCase : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] _UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]] _UpperCAmelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :] def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ): _UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _UpperCAmelCase : Dict = GLPNImageProcessor() # prepare image _UpperCAmelCase : List[Any] = prepare_img() _UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict _UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) ) # rename keys _UpperCAmelCase : List[str] = rename_keys(a_ ) # key and value matrices need special treatment read_in_k_v(a_, a_ ) # create HuggingFace model and load state dict _UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass _UpperCAmelCase : Dict = model(a_ ) _UpperCAmelCase : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _UpperCAmelCase : Optional[Any] = torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: _UpperCAmelCase : Tuple = torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) _UpperCAmelCase : Dict = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, ) image_processor.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
17
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : int = KandinskyVaaImgaImgPipeline UpperCamelCase_ : List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] UpperCamelCase_ : Tuple = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] UpperCamelCase_ : str = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] UpperCamelCase_ : Any = False @property def _lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" return 3_2 @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return 3_2 @property def _lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" return self.time_input_dim @property def _lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return self.time_input_dim * 4 @property def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" return 1_0_0 @property def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : Dict = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } _UpperCAmelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__ ) return model @property def _lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.dummy_unet _UpperCAmelCase : str = self.dummy_movq _UpperCAmelCase : Optional[int] = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_0085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } _UpperCAmelCase : Union[str, Any] = DDIMScheduler(**lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=0 ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCAmelCase__ ) # create init_image _UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = "cpu" _UpperCAmelCase : List[Any] = self.get_dummy_components() _UpperCAmelCase : Optional[int] = self.pipeline_class(**lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = output.images _UpperCAmelCase : str = pipe( **self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0] _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _UpperCAmelCase : List[str] = np.array( [0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) _UpperCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) _UpperCAmelCase : List[str] = "A red cartoon frog, 4k" _UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) _UpperCAmelCase : List[Any] = pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() _UpperCAmelCase : Dict = pipeline( image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , ) _UpperCAmelCase : Optional[int] = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
17
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = 10 _UpperCAmelCase : int = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) _UpperCAmelCase : List[str] = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(a_ ) ), }, features=a_, ) return dataset @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=a_ ) return filename # FILE_CONTENT + files __a = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt" _UpperCAmelCase : Tuple = FILE_CONTENT with open(a_, "w" ) as f: f.write(a_ ) return filename @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" _UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" ) with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import gzip _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) _UpperCAmelCase : Any = bytes(a_, "utf-8" ) with gzip.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): if datasets.config.LZ4_AVAILABLE: import lza.frame _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" _UpperCAmelCase : str = bytes(a_, "utf-8" ) with lza.frame.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Any ): if datasets.config.PY7ZR_AVAILABLE: import pyazr _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(a_, "w" ) as archive: archive.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: List[str] ): import tarfile _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): import lzma _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz" _UpperCAmelCase : List[str] = bytes(a_, "utf-8" ) with lzma.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: Tuple ): import zipfile _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst" _UpperCAmelCase : int = bytes(a_, "utf-8" ) with zstd.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): _UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml" _UpperCAmelCase : Tuple = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(a_, "w" ) as f: f.write(a_ ) return filename __a = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __a = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __a = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __a = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __a = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : str = datasets.Dataset.from_dict(a_ ) _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(a_ ) ) as con: _UpperCAmelCase : List[Any] = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str, a_: str ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(a_, "rb" ) as f: _UpperCAmelCase : Any = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ): _UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) ) f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ): _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) _UpperCAmelCase : Dict = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(a_, "wb" ) as f: _UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ ) _UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ ) writer.write_table(a_ ) writer.close() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : str = {"data": DATA} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(a_, "w" ) as f: for item in DATA_312: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(a_, "w" ) as f: for item in DATA_STR: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ): import gzip _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ): import gzip _UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ): _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : List[str] = ["0", "1", "2", "3"] _UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Dict = ["0", "1", "2", "3"] _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = ["0", "1", "2", "3"] _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename("unsupported.ext" ) ) f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Optional[Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) return data_dir
17
1
'''simple docstring''' __a = 8.3_1_4_4_5_9_8 def __UpperCAmelCase ( a_: float, a_: float ): if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __a = 300 __a = 28 __a = rms_speed_of_molecule(temperature, molar_mass) print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __a = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0} _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Tuple = batch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Optional[Any] = image_size _UpperCAmelCase : Dict = min_resolution _UpperCAmelCase : str = max_resolution _UpperCAmelCase : List[Any] = size _UpperCAmelCase : Union[str, Any] = do_normalize _UpperCAmelCase : Optional[Any] = do_convert_rgb _UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] _UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" _UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase : str = 2_0_4_8 _UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : Union[str, Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase__ ): _UpperCAmelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches _UpperCAmelCase : Any = "Hello" _UpperCAmelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : List[Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) _UpperCAmelCase : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Union[str, Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase : List[Any] = 3 @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
17
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __UpperCAmelCase ( a_: List[str]=None ): if subparsers is not None: _UpperCAmelCase : str = subparsers.add_parser("env" ) else: _UpperCAmelCase : Any = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file", default=a_, help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=a_ ) return parser def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[Any] = torch.__version__ _UpperCAmelCase : int = torch.cuda.is_available() _UpperCAmelCase : int = is_xpu_available() _UpperCAmelCase : List[Any] = is_npu_available() _UpperCAmelCase : Optional[int] = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(a_ ): _UpperCAmelCase : str = load_config_from_file(args.config_file ).to_dict() _UpperCAmelCase : int = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(a_ ), "PyTorch NPU available": str(a_ ), "System RAM": f"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""", } if pt_cuda_available: _UpperCAmelCase : List[str] = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) _UpperCAmelCase : Optional[int] = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(a_, a_ ) else f"""\t{accelerate_config}""" ) print(a_ ) _UpperCAmelCase : Optional[int] = accelerate_config return info def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = env_command_parser() _UpperCAmelCase : int = parser.parse_args() env_command(a_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
17
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: float, a_: float ): if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : str , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Tuple = path_or_paths _UpperCAmelCase : List[str] = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else "train" _UpperCAmelCase : Dict = features _UpperCAmelCase : str = cache_dir _UpperCAmelCase : Tuple = keep_in_memory _UpperCAmelCase : Optional[Any] = streaming _UpperCAmelCase : Optional[int] = num_proc _UpperCAmelCase : Tuple = kwargs @abstractmethod def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: """simple docstring""" pass class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Optional[int] , ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = features _UpperCAmelCase : Any = cache_dir _UpperCAmelCase : Optional[Any] = keep_in_memory _UpperCAmelCase : Union[str, Any] = streaming _UpperCAmelCase : Optional[Any] = num_proc _UpperCAmelCase : Optional[int] = kwargs @abstractmethod def _lowerCAmelCase ( self : Tuple ) -> Union[Dataset, IterableDataset]: """simple docstring""" pass
17
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A__ : """simple docstring""" UpperCamelCase_ : Any = XGLMConfig UpperCamelCase_ : Union[str, Any] = {} UpperCamelCase_ : Dict = '''gelu''' def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : str = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : List[Any] = use_input_mask _UpperCAmelCase : Optional[int] = use_labels _UpperCAmelCase : str = vocab_size _UpperCAmelCase : int = d_model _UpperCAmelCase : Tuple = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Tuple = ffn_dim _UpperCAmelCase : Any = activation_function _UpperCAmelCase : Union[str, Any] = activation_dropout _UpperCAmelCase : Union[str, Any] = attention_dropout _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : int = initializer_range _UpperCAmelCase : Any = None _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : Tuple = 1 def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _UpperCAmelCase : Any = None if self.use_input_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Optional[Any] = self.get_config() _UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : List[Any] = config_and_inputs _UpperCAmelCase : Optional[int] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase_ : Tuple = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Tuple = False def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Dict = TFXGLMModelTester(self ) _UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" super().test_resize_token_embeddings() @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on _UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" ) _UpperCAmelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] ) _UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[int] = "left" # use different length sentences to test batching _UpperCAmelCase : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = inputs["input_ids"] _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) _UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids _UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
17
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if len(a_ ) == 0: return array _UpperCAmelCase , _UpperCAmelCase : List[str] = min(a_ ), max(a_ ) # Compute the variables _UpperCAmelCase : str = _max - _min + 1 _UpperCAmelCase , _UpperCAmelCase : Dict = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: _UpperCAmelCase : int = i - _min _UpperCAmelCase : List[str] = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. _UpperCAmelCase : Tuple = 0 for i in range(a_ ): while holes_repeat[i] > 0: _UpperCAmelCase : Tuple = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __a = input('Enter numbers separated by comma:\n') __a = [int(x) for x in user_input.split(',')] print(pigeon_sort(unsorted))
17
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ): _UpperCAmelCase : Tuple = str(a_ ) dataset_info.write_to_directory(a_ ) _UpperCAmelCase : Any = DatasetInfo.from_directory(a_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a_, "dataset_info.json" ) ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, ) _UpperCAmelCase : Tuple = dataset_info._to_yaml_dict() assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(a_ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(a_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ): _UpperCAmelCase : str = DatasetInfo() _UpperCAmelCase : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1_337 ), } ), ], ) def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(a_ ) dataset_infos_dict.write_to_directory(a_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a_, "README.md" ) )
17
1
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __a = 'CompVis/stable-diffusion-v1-1' __a = 'CompVis/stable-diffusion-v1-2' __a = 'CompVis/stable-diffusion-v1-3' __a = 'CompVis/stable-diffusion-v1-4' class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : str , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , lowerCAmelCase__ : bool = True , ) -> Optional[int]: """simple docstring""" super()._init_() _UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Any = StableDiffusionPipeline( vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=lowerCAmelCase__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _lowerCAmelCase ( self : int ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , lowerCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )} def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Any: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase : str = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" self.enable_attention_slicing(lowerCAmelCase__ ) @torch.no_grad() def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Union[str, Any] , ) -> Any: """simple docstring""" return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Dict: """simple docstring""" return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : int , ) -> Dict: """simple docstring""" return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : int , ) -> Optional[Any]: """simple docstring""" return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : str , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" self.to(lowerCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 _UpperCAmelCase : Tuple = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 _UpperCAmelCase : Dict = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 _UpperCAmelCase : Tuple = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 _UpperCAmelCase : Optional[Any] = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
17
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(map(a_, str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
1
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(int(a_ ) for x in str(factorial(a_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def __UpperCAmelCase ( a_: Iterable[str], a_: int ): _UpperCAmelCase : List[str] = iter(a_ ) while True: _UpperCAmelCase : int = tuple(itertools.islice(a_, a_ ) ) if not chunk: return yield chunk def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : List[Any] = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCAmelCase : List[str] = "" if len(a_ ) < 2: return dirty for i in range(len(a_ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(a_ ) & 1: clean += "X" return clean def __UpperCAmelCase ( a_: str ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) _UpperCAmelCase : Dict = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCAmelCase : str = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(a_ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(a_ ) return table def __UpperCAmelCase ( a_: str, a_: str ): _UpperCAmelCase : Tuple = generate_table(a_ ) _UpperCAmelCase : Union[str, Any] = prepare_input(a_ ) _UpperCAmelCase : List[Any] = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_, 2 ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(table.index(a_ ), 5 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = divmod(table.index(a_ ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __UpperCAmelCase ( a_: str, a_: str ): _UpperCAmelCase : Optional[int] = generate_table(a_ ) _UpperCAmelCase : Optional[Any] = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_, 2 ): _UpperCAmelCase , _UpperCAmelCase : Optional[int] = divmod(table.index(a_ ), 5 ) _UpperCAmelCase , _UpperCAmelCase : Any = divmod(table.index(a_ ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
17
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } __a = { 'gpt-neox-20b': 2_048, } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple="<|endoftext|>" , lowerCAmelCase__ : str="<|endoftext|>" , lowerCAmelCase__ : Dict="<|endoftext|>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: _UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) ) _UpperCAmelCase : List[str] = add_prefix_space _UpperCAmelCase : Optional[Any] = pre_tok_class(**lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = add_prefix_space def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _UpperCAmelCase : List[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : "Conversation" ) -> List[int]: """simple docstring""" _UpperCAmelCase : List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] ) if len(lowerCAmelCase__ ) > self.model_max_length: _UpperCAmelCase : Any = input_ids[-self.model_max_length :] return input_ids
17
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( a_: str ): for param in module.parameters(): _UpperCAmelCase : Any = False def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" ) return timestamp
17
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : List[Any] = '''openai/whisper-base''' UpperCamelCase_ : Optional[Any] = ( '''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ''' '''transcribed text.''' ) UpperCamelCase_ : Dict = '''transcriber''' UpperCamelCase_ : Optional[Any] = WhisperProcessor UpperCamelCase_ : Dict = WhisperForConditionalGeneration UpperCamelCase_ : Dict = ['''audio'''] UpperCamelCase_ : Dict = ['''text'''] def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" ).input_features def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Any: """simple docstring""" return self.model.generate(inputs=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]: """simple docstring""" return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0]
17
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __a = 2 class A__ : """simple docstring""" def __init__( self : str , *, # begin keyword-only arguments lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Optional[Any]="<pad>" , lowerCAmelCase__ : List[Any]="</s>" , lowerCAmelCase__ : Tuple="<unk>" , lowerCAmelCase__ : Tuple=None , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = bos, unk, pad, eos _UpperCAmelCase : Any = [] _UpperCAmelCase : Dict = [] _UpperCAmelCase : Union[str, Any] = {} _UpperCAmelCase : List[str] = self.add_symbol(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = self.add_symbol(lowerCAmelCase__ ) _UpperCAmelCase : int = self.add_symbol(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = self.add_symbol(lowerCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = len(self.symbols ) def __eq__( self : int , lowerCAmelCase__ : Optional[Any] ) -> Dict: """simple docstring""" return self.indices == other.indices def __getitem__( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Dict ) -> int: """simple docstring""" return len(self.symbols ) def __contains__( self : Tuple , lowerCAmelCase__ : Optional[Any] ) -> Tuple: """simple docstring""" return sym in self.indices @classmethod def _lowerCAmelCase ( cls : Any , lowerCAmelCase__ : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[int] = cls() d.add_from_file(lowerCAmelCase__ ) return d def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : List[str]=False ) -> Dict: """simple docstring""" if word in self.indices and not overwrite: _UpperCAmelCase : Dict = self.indices[word] _UpperCAmelCase : Optional[Any] = self.count[idx] + n return idx else: _UpperCAmelCase : Dict = len(self.symbols ) _UpperCAmelCase : Dict = idx self.symbols.append(lowerCAmelCase__ ) self.count.append(lowerCAmelCase__ ) return idx def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int ) -> List[str]: """simple docstring""" return 0 def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any ) -> Any: """simple docstring""" if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): try: with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(lowerCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCAmelCase__ ) ) return _UpperCAmelCase : List[str] = f.readlines() _UpperCAmelCase : Any = self._load_meta(lowerCAmelCase__ ) for line in lines[indices_start_line:]: try: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": _UpperCAmelCase : Optional[Any] = True _UpperCAmelCase , _UpperCAmelCase : List[str] = line.rsplit(" " , 1 ) else: _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[str] = int(lowerCAmelCase__ ) _UpperCAmelCase : int = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(lowerCAmelCase__ ) ) self.add_symbol(lowerCAmelCase__ , n=lowerCAmelCase__ , overwrite=lowerCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def __UpperCAmelCase ( a_: List[Any] ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCAmelCase : Any = dict((re.sub(r"@@$", "", a_ ), v) if k.endswith("@@" ) else (re.sub(r"$", "</w>", a_ ), v) for k, v in d.items() ) _UpperCAmelCase : Union[str, Any] = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] _UpperCAmelCase : Tuple = d[k] # restore return da def __UpperCAmelCase ( a_: Any, a_: List[str] ): # prep if not os.path.exists(a_ ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(a_, exist_ok=a_ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models _UpperCAmelCase : List[Any] = os.path.join(a_, "checkpoint.pt" ) if not os.path.isfile(a_ ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) _UpperCAmelCase : Dict = torch.load(a_, map_location="cpu" ) _UpperCAmelCase : List[Any] = chkpt["cfg"]["model"] # dicts _UpperCAmelCase : Any = os.path.join(a_, "dict.txt" ) if not os.path.isfile(a_ ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) _UpperCAmelCase : Any = Dictionary.load(a_ ) _UpperCAmelCase : Dict = rewrite_dict_keys(src_dict.indices ) _UpperCAmelCase : List[str] = len(a_ ) _UpperCAmelCase : int = os.path.join(a_, VOCAB_FILES_NAMES["vocab_file"] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(json.dumps(a_, ensure_ascii=a_, indent=a_ ) ) # merges_file (bpecodes) _UpperCAmelCase : int = os.path.join(a_, "bpecodes" ) if not os.path.isfile(a_ ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) _UpperCAmelCase : int = os.path.join(a_, VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(a_, a_ ) # model config _UpperCAmelCase : int = os.path.join(a_, "config.json" ) _UpperCAmelCase : Dict = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1e-1_2, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(json.dumps(a_, ensure_ascii=a_, indent=a_ ) ) # tokenizer config _UpperCAmelCase : Dict = os.path.join(a_, a_ ) _UpperCAmelCase : Optional[Any] = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1_024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(json.dumps(a_, ensure_ascii=a_, indent=a_ ) ) # model _UpperCAmelCase : Tuple = chkpt["model"] # remove unneeded keys _UpperCAmelCase : int = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(a_, a_ ) _UpperCAmelCase : Any = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): _UpperCAmelCase : int = model_state_dict.pop(a_ ) else: _UpperCAmelCase : str = model_state_dict.pop(a_ ) _UpperCAmelCase : Union[str, Any] = BioGptConfig.from_pretrained(a_ ) _UpperCAmelCase : Optional[int] = BioGptForCausalLM(a_ ) # check that it loads ok model_new.load_state_dict(a_ ) # save _UpperCAmelCase : Tuple = os.path.join(a_, a_ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(a_, a_ ) print("Conversion is done!" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Any class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Any ) -> str: """simple docstring""" _UpperCAmelCase : Any = data _UpperCAmelCase : Node | None = None class A__ : """simple docstring""" def __init__( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = None _UpperCAmelCase : Dict = None def __iter__( self : Dict ) -> Iterator[Any]: """simple docstring""" _UpperCAmelCase : Dict = self.head while self.head: yield node.data _UpperCAmelCase : List[str] = node.next if node == self.head: break def __len__( self : List[Any] ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return "->".join(str(lowerCAmelCase__ ) for item in iter(self ) ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Any ) -> None: """simple docstring""" self.insert_nth(0 , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None: """simple docstring""" if index < 0 or index > len(self ): raise IndexError("list index out of range." ) _UpperCAmelCase : Optional[Any] = Node(lowerCAmelCase__ ) if self.head is None: _UpperCAmelCase : Optional[int] = new_node # first node points itself _UpperCAmelCase : int = new_node elif index == 0: # insert at head _UpperCAmelCase : Any = self.head _UpperCAmelCase : int = new_node else: _UpperCAmelCase : Any = self.head for _ in range(index - 1 ): _UpperCAmelCase : Tuple = temp.next _UpperCAmelCase : Optional[Any] = temp.next _UpperCAmelCase : Tuple = new_node if index == len(self ) - 1: # insert at tail _UpperCAmelCase : Optional[int] = new_node def _lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" return self.delete_nth(0 ) def _lowerCAmelCase ( self : str ) -> Any: """simple docstring""" return self.delete_nth(len(self ) - 1 ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : int = 0 ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise IndexError("list index out of range." ) _UpperCAmelCase : str = self.head if self.head == self.tail: # just one node _UpperCAmelCase : Any = None elif index == 0: # delete head node _UpperCAmelCase : Union[str, Any] = self.tail.next.next _UpperCAmelCase : int = self.head.next else: _UpperCAmelCase : Optional[int] = self.head for _ in range(index - 1 ): _UpperCAmelCase : List[Any] = temp.next _UpperCAmelCase : Any = temp.next _UpperCAmelCase : Any = temp.next.next if index == len(self ) - 1: # delete at tail _UpperCAmelCase : str = temp return delete_node.data def _lowerCAmelCase ( self : Tuple ) -> bool: """simple docstring""" return len(self ) == 0 def __UpperCAmelCase ( ): _UpperCAmelCase : int = CircularLinkedList() assert len(a_ ) == 0 assert circular_linked_list.is_empty() is True assert str(a_ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(a_ ) == i circular_linked_list.insert_nth(a_, i + 1 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1, 6 ) ) circular_linked_list.insert_tail(6 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1, 7 ) ) circular_linked_list.insert_head(0 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(0, 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(a_ ) == "->".join(str(a_ ) for i in range(1, 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2, 3 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1, 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __UpperCAmelCase ( a_: int ): # A local function to see if a dot lands in the circle. def is_in_circle(a_: float, a_: float ) -> bool: _UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(a_ ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : Optional[int] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ): return mean( function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value) def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ): def identity_function(a_: float ) -> float: return x _UpperCAmelCase : Union[str, Any] = area_under_curve_estimator( a_, a_, a_, a_ ) _UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __UpperCAmelCase ( a_: int ): def function_to_integrate(a_: float ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( a_, a_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def __UpperCAmelCase ( a_: int="ro", a_: str="en", a_: str="wmt16", a_: Dict=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) _UpperCAmelCase : Union[str, Any] = f"""{src_lang}-{tgt_lang}""" print(f"""Converting {dataset}-{pair}""" ) _UpperCAmelCase : Optional[int] = datasets.load_dataset(a_, a_ ) if save_dir is None: _UpperCAmelCase : List[Any] = f"""{dataset}-{pair}""" _UpperCAmelCase : Tuple = Path(a_ ) save_dir.mkdir(exist_ok=a_ ) for split in ds.keys(): print(f"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets _UpperCAmelCase : Dict = "val" if split == "validation" else split _UpperCAmelCase : Optional[Any] = save_dir.joinpath(f"""{fn}.source""" ) _UpperCAmelCase : Any = save_dir.joinpath(f"""{fn}.target""" ) _UpperCAmelCase : str = src_path.open("w+" ) _UpperCAmelCase : int = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): _UpperCAmelCase : Union[str, Any] = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(f"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
17
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = CycleDiffusionPipeline UpperCamelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } UpperCamelCase_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) UpperCamelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : int = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _UpperCAmelCase : Optional[int] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) _UpperCAmelCase : Any = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _UpperCAmelCase : str = CLIPTextModel(lowerCAmelCase__ ) _UpperCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : int=0 ) -> List[str]: """simple docstring""" _UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) _UpperCAmelCase : int = image / 2 + 0.5 if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : Optional[int] = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Optional[int] = self.get_dummy_components() _UpperCAmelCase : str = CycleDiffusionPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : int = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = pipe(**lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = output.images _UpperCAmelCase : Optional[int] = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : int = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.get_dummy_components() for name, module in components.items(): if hasattr(lowerCAmelCase__ , "half" ): _UpperCAmelCase : int = module.half() _UpperCAmelCase : str = CycleDiffusionPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : Tuple = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = pipe(**lowerCAmelCase__ ) _UpperCAmelCase : int = output.images _UpperCAmelCase : Optional[Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : Union[str, Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def _lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" _UpperCAmelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _UpperCAmelCase : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _UpperCAmelCase : Optional[Any] = init_image.resize((5_1_2, 5_1_2) ) _UpperCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-4" _UpperCAmelCase : List[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" ) _UpperCAmelCase : List[Any] = CycleDiffusionPipeline.from_pretrained( lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() _UpperCAmelCase : int = "A black colored car" _UpperCAmelCase : Union[str, Any] = "A blue colored car" _UpperCAmelCase : List[Any] = torch.manual_seed(0 ) _UpperCAmelCase : Tuple = pipe( prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="np" , ) _UpperCAmelCase : str = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _UpperCAmelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _UpperCAmelCase : int = init_image.resize((5_1_2, 5_1_2) ) _UpperCAmelCase : List[str] = "CompVis/stable-diffusion-v1-4" _UpperCAmelCase : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" ) _UpperCAmelCase : str = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() _UpperCAmelCase : List[str] = "A black colored car" _UpperCAmelCase : Optional[Any] = "A blue colored car" _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : Any = pipe( prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="np" , ) _UpperCAmelCase : str = output.images assert np.abs(image - expected_image ).max() < 2e-2
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __UpperCAmelCase ( a_: Tuple, a_: Optional[Any]=None ): _UpperCAmelCase : int = None if token is not None: _UpperCAmelCase : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} _UpperCAmelCase : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" _UpperCAmelCase : Any = requests.get(a_, headers=a_ ).json() _UpperCAmelCase : Optional[int] = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _UpperCAmelCase : Any = math.ceil((result["total_count"] - 100) / 100 ) for i in range(a_ ): _UpperCAmelCase : Optional[int] = requests.get(url + f"""&page={i + 2}""", headers=a_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __UpperCAmelCase ( a_: List[str], a_: Optional[int]=None ): _UpperCAmelCase : List[str] = None if token is not None: _UpperCAmelCase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} _UpperCAmelCase : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" _UpperCAmelCase : Any = requests.get(a_, headers=a_ ).json() _UpperCAmelCase : Dict = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _UpperCAmelCase : List[Any] = math.ceil((result["total_count"] - 100) / 100 ) for i in range(a_ ): _UpperCAmelCase : Union[str, Any] = requests.get(url + f"""&page={i + 2}""", headers=a_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __UpperCAmelCase ( a_: Optional[int], a_: str, a_: Any, a_: Any ): _UpperCAmelCase : Optional[Any] = None if token is not None: _UpperCAmelCase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""} _UpperCAmelCase : Optional[Any] = requests.get(a_, headers=a_, allow_redirects=a_ ) _UpperCAmelCase : Any = result.headers["Location"] _UpperCAmelCase : int = requests.get(a_, allow_redirects=a_ ) _UpperCAmelCase : Any = os.path.join(a_, f"""{artifact_name}.zip""" ) with open(a_, "wb" ) as fp: fp.write(response.content ) def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any]=None ): _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : Any = [] _UpperCAmelCase : List[Any] = None with zipfile.ZipFile(a_ ) as z: for filename in z.namelist(): if not os.path.isdir(a_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(a_ ) as f: for line in f: _UpperCAmelCase : Optional[Any] = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _UpperCAmelCase : Union[str, Any] = line[: line.index(": " )] _UpperCAmelCase : Optional[int] = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _UpperCAmelCase : Dict = line[len("FAILED " ) :] failed_tests.append(a_ ) elif filename == "job_name.txt": _UpperCAmelCase : Optional[int] = line if len(a_ ) != len(a_ ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(a_ )} for `errors` """ f"""and {len(a_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" " problem." ) _UpperCAmelCase : Optional[int] = None if job_name and job_links: _UpperCAmelCase : Union[str, Any] = job_links.get(a_, a_ ) # A list with elements of the form (line of error, error, failed test) _UpperCAmelCase : List[Any] = [x + [y] + [job_link] for x, y in zip(a_, a_ )] return result def __UpperCAmelCase ( a_: str, a_: int=None ): _UpperCAmelCase : int = [] _UpperCAmelCase : Union[str, Any] = [os.path.join(a_, a_ ) for p in os.listdir(a_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(a_, job_links=a_ ) ) return errors def __UpperCAmelCase ( a_: Tuple, a_: Any=None ): _UpperCAmelCase : List[Any] = Counter() counter.update([x[1] for x in logs] ) _UpperCAmelCase : Any = counter.most_common() _UpperCAmelCase : Optional[Any] = {} for error, count in counts: if error_filter is None or error not in error_filter: _UpperCAmelCase : Tuple = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _UpperCAmelCase : List[str] = dict(sorted(r.items(), key=lambda a_ : item[1]["count"], reverse=a_ ) ) return r def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : List[Any] = test.split("::" )[0] if test.startswith("tests/models/" ): _UpperCAmelCase : Optional[Any] = test.split("/" )[2] else: _UpperCAmelCase : Optional[int] = None return test def __UpperCAmelCase ( a_: Dict, a_: Optional[Any]=None ): _UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] _UpperCAmelCase : str = [x for x in logs if x[2] is not None] _UpperCAmelCase : Optional[Any] = {x[2] for x in logs} _UpperCAmelCase : List[str] = {} for test in tests: _UpperCAmelCase : Union[str, Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _UpperCAmelCase : Union[str, Any] = counter.most_common() _UpperCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _UpperCAmelCase : Any = sum(error_counts.values() ) if n_errors > 0: _UpperCAmelCase : Optional[int] = {"count": n_errors, "errors": error_counts} _UpperCAmelCase : Optional[Any] = dict(sorted(r.items(), key=lambda a_ : item[1]["count"], reverse=a_ ) ) return r def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = "| no. | error | status |" _UpperCAmelCase : List[str] = "|-:|:-|:-|" _UpperCAmelCase : int = [header, sep] for error in reduced_by_error: _UpperCAmelCase : Dict = reduced_by_error[error]["count"] _UpperCAmelCase : List[str] = f"""| {count} | {error[:100]} | |""" lines.append(a_ ) return "\n".join(a_ ) def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : List[str] = "| model | no. of errors | major error | count |" _UpperCAmelCase : Optional[Any] = "|-:|-:|-:|-:|" _UpperCAmelCase : int = [header, sep] for model in reduced_by_model: _UpperCAmelCase : int = reduced_by_model[model]["count"] _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = list(reduced_by_model[model]["errors"].items() )[0] _UpperCAmelCase : List[Any] = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(a_ ) return "\n".join(a_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') __a = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __a = get_job_links(args.workflow_run_id, token=args.token) __a = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __a = k.find(' / ') __a = k[index + len(' / ') :] __a = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __a = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __a = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __a = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __a = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __a = reduce_by_error(errors) __a = reduce_by_model(errors) __a = make_github_table(reduced_by_error) __a = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
17
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
1
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = ConsistencyModelPipeline UpperCamelCase_ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS UpperCamelCase_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt UpperCamelCase_ : Union[str, Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) @property def _lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet" , ) return unet @property def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = UNetaDModel.from_pretrained( "diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , ) return unet def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[int]=False ) -> Any: """simple docstring""" if class_cond: _UpperCAmelCase : Optional[int] = self.dummy_cond_unet else: _UpperCAmelCase : Tuple = self.dummy_uncond_unet # Default to CM multistep sampler _UpperCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCAmelCase : str = { "unet": unet, "scheduler": scheduler, } return components def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=0 ) -> str: """simple docstring""" if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : Tuple = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : int = { "batch_size": 1, "num_inference_steps": None, "timesteps": [2_2, 0], "generator": generator, "output_type": "np", } return inputs def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Any = self.get_dummy_components() _UpperCAmelCase : List[str] = ConsistencyModelPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = ConsistencyModelPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : Any = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Any = 0 _UpperCAmelCase : Union[str, Any] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : List[str] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : int = self.get_dummy_components() _UpperCAmelCase : str = ConsistencyModelPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : Any = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Any = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : Any = 1 _UpperCAmelCase : Dict = None _UpperCAmelCase : str = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] _UpperCAmelCase : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**lowerCAmelCase__ ) _UpperCAmelCase : Dict = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ ) _UpperCAmelCase : str = 1 _UpperCAmelCase : Tuple = None _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Tuple = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] _UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : int="cpu" , lowerCAmelCase__ : int=torch.floataa , lowerCAmelCase__ : List[str]=(1, 3, 6_4, 6_4) ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = torch.manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = { "num_inference_steps": None, "timesteps": [2_2, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: _UpperCAmelCase : Optional[int] = self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ ) _UpperCAmelCase : Dict = latents return inputs def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Optional[Any]="cpu" , lowerCAmelCase__ : Optional[int]=torch.floataa , lowerCAmelCase__ : str=(1, 3, 6_4, 6_4) ) -> Dict: """simple docstring""" if type(lowerCAmelCase__ ) == str: _UpperCAmelCase : Dict = torch.device(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ) return latents def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) _UpperCAmelCase : Dict = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCAmelCase : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Any = self.get_inputs() _UpperCAmelCase : int = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 6_4, 6_4, 3) _UpperCAmelCase : str = image[0, -3:, -3:, -1] _UpperCAmelCase : List[str] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) _UpperCAmelCase : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCAmelCase : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = self.get_inputs() _UpperCAmelCase : List[str] = 1 _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : List[Any] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 6_4, 6_4, 3) _UpperCAmelCase : Dict = image[0, -3:, -3:, -1] _UpperCAmelCase : int = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) _UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCAmelCase : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ): _UpperCAmelCase : List[Any] = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 6_4, 6_4, 3) _UpperCAmelCase : str = image[0, -3:, -3:, -1] _UpperCAmelCase : List[Any] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _UpperCAmelCase : Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" ) _UpperCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , ) _UpperCAmelCase : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ ) _UpperCAmelCase : str = 1 _UpperCAmelCase : Any = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ): _UpperCAmelCase : str = pipe(**lowerCAmelCase__ ).images assert image.shape == (1, 6_4, 6_4, 3) _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] _UpperCAmelCase : int = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
17
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
1
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = [randint(-1_000, 1_000 ) for i in range(10 )] _UpperCAmelCase : List[Any] = randint(-5_000, 5_000 ) return (arr, r) __a = make_dataset() def __UpperCAmelCase ( a_: list[int], a_: int ): for triplet in permutations(a_, 3 ): if sum(a_ ) == target: return tuple(sorted(a_ ) ) return (0, 0, 0) def __UpperCAmelCase ( a_: list[int], a_: int ): arr.sort() _UpperCAmelCase : Dict = len(a_ ) for i in range(n - 1 ): _UpperCAmelCase , _UpperCAmelCase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" _UpperCAmelCase : str = "\ntriplet_sum1(*dataset)\n" _UpperCAmelCase : Tuple = "\ntriplet_sum2(*dataset)\n" _UpperCAmelCase : Any = repeat(setup=a_, stmt=a_, repeat=5, number=10_000 ) _UpperCAmelCase : int = repeat(setup=a_, stmt=a_, repeat=5, number=10_000 ) return (min(a_ ), min(a_ )) if __name__ == "__main__": from doctest import testmod testmod() __a = solution_times() print(f'The time for naive implementation is {times[0]}.') print(f'The time for optimized implementation is {times[1]}.')
17
'''simple docstring''' from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class A__ : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class A__ : """simple docstring""" UpperCamelCase_ : Union[str, Any] = [] def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = obj _UpperCAmelCase : int = target _UpperCAmelCase : Optional[int] = new _UpperCAmelCase : Any = target.split("." )[0] _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Dict = attrs or [] def __enter__( self : List[str] ) -> int: """simple docstring""" *_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: _UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) _UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: _UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCAmelCase : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
17
1
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger() @dataclass class A__ : """simple docstring""" UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCamelCase ) UpperCamelCase_ : list = field(default_factory=UpperCamelCase ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Tensor ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> List[Any]: """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 1 UpperCamelCase_ : List = field(default_factory=UpperCamelCase ) UpperCamelCase_ : List = field(default_factory=UpperCamelCase ) UpperCamelCase_ : bool = True def __call__( self : List[str] , lowerCAmelCase__ : Tensor ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized _UpperCAmelCase : str = Tracker(self.src )(lowerCAmelCase__ ).parametrized _UpperCAmelCase : Optional[int] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ) and self.raise_if_mismatch: raise Exception( F"""Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while""" F""" destination module has {len(lowerCAmelCase__ )}.""" ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : nn.Module ) -> Dict: """simple docstring""" super().__init__() _UpperCAmelCase : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), F"""Unexpected layer name {k}""" _UpperCAmelCase : Tuple = len(lowerCAmelCase__ ) + 1 feature_blocks.append((F"""res{block_index}""", v) ) _UpperCAmelCase : Optional[Any] = nn.ModuleDict(lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Tensor ) -> List[str]: """simple docstring""" return get_trunk_forward_outputs( lowerCAmelCase__ , out_feat_keys=lowerCAmelCase__ , feature_blocks=self._feature_blocks , ) class A__ ( UpperCamelCase ): """simple docstring""" def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str ) -> str: """simple docstring""" _UpperCAmelCase : int = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Dict , lowerCAmelCase__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: """simple docstring""" if x not in self: _UpperCAmelCase : Tuple = self.convert_name_to_timm(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = partial(lambda: (timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval(), None) ) else: _UpperCAmelCase : List[Any] = super().__getitem__(lowerCAmelCase__ ) return val class A__ ( UpperCamelCase ): """simple docstring""" def __getitem__( self : Optional[int] , lowerCAmelCase__ : str ) -> Callable[[], nn.Module]: """simple docstring""" if "seer" in x and "in1k" not in x: _UpperCAmelCase : Optional[Any] = RegNetModel else: _UpperCAmelCase : Dict = RegNetForImageClassification return val def __UpperCAmelCase ( a_: Tuple, a_: Dict, a_: List[Tuple[str, str]] ): for from_key, to_key in keys: _UpperCAmelCase : Union[str, Any] = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def __UpperCAmelCase ( a_: str, a_: Callable[[], nn.Module], a_: Callable[[], nn.Module], a_: RegNetConfig, a_: Path, a_: bool = True, ): print(f"""Converting {name}...""" ) with torch.no_grad(): _UpperCAmelCase , _UpperCAmelCase : Any = from_model_func() _UpperCAmelCase : int = our_model_func(a_ ).eval() _UpperCAmelCase : List[Any] = ModuleTransfer(src=a_, dest=a_, raise_if_mismatch=a_ ) _UpperCAmelCase : Any = torch.randn((1, 3, 224, 224) ) module_transfer(a_ ) if from_state_dict is not None: _UpperCAmelCase : Optional[Any] = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: _UpperCAmelCase : Optional[int] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] _UpperCAmelCase : List[Any] = manually_copy_vissl_head(a_, our_model.state_dict(), a_ ) our_model.load_state_dict(a_ ) _UpperCAmelCase : Dict = our_model(a_, output_hidden_states=a_ ) _UpperCAmelCase : Optional[int] = ( our_outputs.logits if isinstance(a_, a_ ) else our_outputs.last_hidden_state ) _UpperCAmelCase : str = from_model(a_ ) _UpperCAmelCase : List[Any] = from_output[-1] if type(a_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: _UpperCAmelCase : List[str] = our_outputs.hidden_states[-1] assert torch.allclose(a_, a_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add model", use_temp_dir=a_, ) _UpperCAmelCase : Optional[Any] = 224 if "seer" not in name else 384 # we can use the convnext one _UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=a_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add image processor", use_temp_dir=a_, ) print(f"""Pushed {name}""" ) def __UpperCAmelCase ( a_: Path, a_: str = None, a_: bool = True ): _UpperCAmelCase : Union[str, Any] = "imagenet-1k-id2label.json" _UpperCAmelCase : Any = 1_000 _UpperCAmelCase : int = (1, num_labels) _UpperCAmelCase : Any = "huggingface/label-files" _UpperCAmelCase : str = num_labels _UpperCAmelCase : List[Any] = json.load(open(cached_download(hf_hub_url(a_, a_, repo_type="dataset" ) ), "r" ) ) _UpperCAmelCase : Union[str, Any] = {int(a_ ): v for k, v in idalabel.items()} _UpperCAmelCase : Tuple = idalabel _UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} _UpperCAmelCase : str = partial(a_, num_labels=a_, idalabel=a_, labelaid=a_ ) _UpperCAmelCase : Union[str, Any] = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1_008], groups_width=48, layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1_360], groups_width=40, layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1_624], groups_width=56, layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1_920], groups_width=120, layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112, layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2_048], groups_width=128, layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1_344, 2_520], groups_width=168, layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1_512], groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1_088], groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1_296], groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2_016], groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2_240], groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1_232, 3_024], groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1_392, 3_712], groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1_968, 4_920], groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1_056, 2_904, 7_392], groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1_696, 2_544, 5_088], groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2_020, 4_040, 11_110, 28_280], groups_width=1_010 ), } _UpperCAmelCase : Optional[int] = NameToOurModelFuncMap() _UpperCAmelCase : List[Any] = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(a_: str, a_: Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]: _UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(a_, model_dir=str(a_ ), map_location="cpu" ) _UpperCAmelCase : str = model_func() # check if we have a head, if yes add it _UpperCAmelCase : Tuple = files["classy_state_dict"]["base_model"]["model"] _UpperCAmelCase : List[Any] = model_state_dict["trunk"] model.load_state_dict(a_ ) return model.eval(), model_state_dict["heads"] # pretrained _UpperCAmelCase : Optional[Any] = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _UpperCAmelCase : str = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _UpperCAmelCase : int = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) _UpperCAmelCase : Optional[Any] = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=6_20.83, w_m=2.52 ) ) ), ) # IN1K finetuned _UpperCAmelCase : Optional[Any] = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _UpperCAmelCase : Tuple = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) _UpperCAmelCase : Dict = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) _UpperCAmelCase : Dict = partial( a_, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1_010, w_a=1_744, w_a=6_20.83, w_m=2.52 ) ) ), ) if model_name: convert_weight_and_push( a_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], a_, a_, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( a_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], a_, a_, a_, ) return config, expected_shape if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help=( 'The name of the model you wish to convert, it must be one of the supported regnet* architecture,' ' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.', ) __a = parser.parse_args() __a = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
17
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __a = datasets.utils.logging.get_logger(__name__) __a = ['names', 'prefix'] __a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] __a = ['encoding_errors', 'on_bad_lines'] __a = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : str = "," UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer" UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Union[int, List[int]]] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[Union[str, List[str]]] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = "." UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = '"' UpperCamelCase_ : int = 0 UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : int = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : int = 1_00_00 UpperCamelCase_ : Optional[datasets.Features] = None UpperCamelCase_ : Optional[str] = "strict" UpperCamelCase_ : Literal["error", "warn", "skip"] = "error" UpperCamelCase_ : Optional[str] = None def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" if self.delimiter is not None: _UpperCAmelCase : Any = self.delimiter if self.column_names is not None: _UpperCAmelCase : List[Any] = self.column_names @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : int = CsvConfig def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = [files] _UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = [files] _UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast _UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCAmelCase : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): _UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
17
1
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __UpperCAmelCase ( a_: str ): return 1.0 / (1.0 + np.exp(-_outputs )) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : List[Any] = np.max(_outputs, axis=-1, keepdims=a_ ) _UpperCAmelCase : Tuple = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=a_ ) class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : str = '''sigmoid''' UpperCamelCase_ : Optional[Any] = '''softmax''' UpperCamelCase_ : int = '''none''' @add_end_docstrings( UpperCamelCase , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Dict = False UpperCamelCase_ : int = ClassificationFunction.NONE def __init__( self : Optional[Any] , **lowerCAmelCase__ : Optional[int] ) -> int: """simple docstring""" super().__init__(**lowerCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any]="" , **lowerCAmelCase__ : Any ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = tokenizer_kwargs _UpperCAmelCase : Any = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: _UpperCAmelCase : Any = self.model.config.return_all_scores if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or top_k is None: _UpperCAmelCase : int = top_k _UpperCAmelCase : Optional[Any] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowerCAmelCase__ , ) if return_all_scores: _UpperCAmelCase : Dict = None else: _UpperCAmelCase : Dict = 1 if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : List[Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _UpperCAmelCase : Dict = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : Union[str, Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : Any = super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _UpperCAmelCase : Union[str, Any] = "top_k" not in kwargs if isinstance(args[0] , lowerCAmelCase__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str , **lowerCAmelCase__ : Optional[int] ) -> Dict[str, GenericTensor]: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.framework if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return self.tokenizer(**lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1 and isinstance(inputs[0] , lowerCAmelCase__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[Any]: """simple docstring""" return self.model(**lowerCAmelCase__ ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Dict=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _UpperCAmelCase : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _UpperCAmelCase : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: _UpperCAmelCase : Dict = self.model.config.function_to_apply else: _UpperCAmelCase : Union[str, Any] = ClassificationFunction.NONE _UpperCAmelCase : Optional[int] = model_outputs["logits"][0] _UpperCAmelCase : Any = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _UpperCAmelCase : List[Any] = sigmoid(lowerCAmelCase__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: _UpperCAmelCase : str = softmax(lowerCAmelCase__ ) elif function_to_apply == ClassificationFunction.NONE: _UpperCAmelCase : Any = outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _UpperCAmelCase : str = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowerCAmelCase__ ) ] if not _legacy: dict_scores.sort(key=lambda lowerCAmelCase__ : x["score"] , reverse=lowerCAmelCase__ ) if top_k is not None: _UpperCAmelCase : Dict = dict_scores[:top_k] return dict_scores
17
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: list[int] ): if not nums: return 0 _UpperCAmelCase : int = nums[0] _UpperCAmelCase : Dict = 0 for num in nums[1:]: _UpperCAmelCase , _UpperCAmelCase : Any = ( max_excluding + num, max(a_, a_ ), ) return max(a_, a_ ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __a = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __a = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __a = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __a = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __a = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __a = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __a = tf.keras.preprocessing.image.img_to_array(test_image) __a = np.expand_dims(test_image, axis=0) __a = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __a = 'Normal' if result[0][0] == 1: __a = 'Abnormality detected'
17
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): _UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" ) if key.startswith("module.decoder" ): _UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )] _UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" ) if "norm" in key: _UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] _UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" ) if "layer_norm1" in key: _UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" ) if "layer_norm2" in key: _UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )] _UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" ) if "attn.q" in key: _UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" ) if "attn.proj" in key: _UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" ) if "attn" in key: _UpperCAmelCase : Dict = key.replace("attn", "attention.self" ) if "fc1" in key: _UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" ) if "fc2" in key: _UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" ) if "linear_pred" in key: _UpperCAmelCase : Any = key.replace("linear_pred", "classifier" ) if "linear_fuse" in key: _UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" ) _UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )] _UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" ) if "bot_conv" in key: _UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" ) if "skip_conv1" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" ) if "skip_conv2" in key: _UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" ) if "fusion1" in key: _UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" ) if "fusion2" in key: _UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" ) if "fusion3" in key: _UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" ) if "fusion" in key and "conv" in key: _UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" ) if key.startswith("module.last_layer_depth" ): _UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" ) _UpperCAmelCase : int = value return new_state_dict def __UpperCAmelCase ( a_: str, a_: List[Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) _UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict _UpperCAmelCase : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] _UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]] _UpperCAmelCase : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] _UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :] def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ): _UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _UpperCAmelCase : Dict = GLPNImageProcessor() # prepare image _UpperCAmelCase : List[Any] = prepare_img() _UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict _UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) ) # rename keys _UpperCAmelCase : List[str] = rename_keys(a_ ) # key and value matrices need special treatment read_in_k_v(a_, a_ ) # create HuggingFace model and load state dict _UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ ) model.load_state_dict(a_ ) model.eval() # forward pass _UpperCAmelCase : Dict = model(a_ ) _UpperCAmelCase : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _UpperCAmelCase : Optional[Any] = torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: _UpperCAmelCase : Tuple = torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) _UpperCAmelCase : Dict = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, ) image_processor.push_to_hub( repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
17
1
'''simple docstring''' __a = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __a = [{'type': 'code', 'content': INSTALL_CONTENT}] __a = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
17
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[Any] = 10 _UpperCAmelCase : int = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) _UpperCAmelCase : List[str] = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(a_ ) ), }, features=a_, ) return dataset @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=a_ ) return filename # FILE_CONTENT + files __a = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt" _UpperCAmelCase : Tuple = FILE_CONTENT with open(a_, "w" ) as f: f.write(a_ ) return filename @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" _UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" ) with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): import gzip _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) _UpperCAmelCase : Any = bytes(a_, "utf-8" ) with gzip.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): if datasets.config.LZ4_AVAILABLE: import lza.frame _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" _UpperCAmelCase : str = bytes(a_, "utf-8" ) with lza.frame.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Any ): if datasets.config.PY7ZR_AVAILABLE: import pyazr _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(a_, "w" ) as archive: archive.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: List[str] ): import tarfile _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): import lzma _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz" _UpperCAmelCase : List[str] = bytes(a_, "utf-8" ) with lzma.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: Tuple ): import zipfile _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd _UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst" _UpperCAmelCase : int = bytes(a_, "utf-8" ) with zstd.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int] ): _UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml" _UpperCAmelCase : Tuple = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(a_, "w" ) as f: f.write(a_ ) return filename __a = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __a = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __a = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __a = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __a = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : str = datasets.Dataset.from_dict(a_ ) _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(a_ ) ) as con: _UpperCAmelCase : List[Any] = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(a_, "w", newline="" ) as f: _UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: str, a_: str ): import bza _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(a_, "rb" ) as f: _UpperCAmelCase : Any = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(a_, "wb" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ): _UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) ) f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ): _UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) _UpperCAmelCase : Dict = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(a_, "wb" ) as f: _UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ ) _UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ ) writer.write_table(a_ ) writer.close() return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : str = {"data": DATA} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) _UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS} with open(a_, "w" ) as f: json.dump(a_, a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int ): _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(a_, "w" ) as f: for item in DATA: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(a_, "w" ) as f: for item in DATA_312: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(a_, "w" ) as f: for item in DATA_STR: f.write(json.dumps(a_ ) + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ): import gzip _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ): import gzip _UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(a_, "rb" ) as orig_file: with gzip.open(a_, "wb" ) as zipped_file: zipped_file.writelines(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ): _UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.basename(a_ ) ) f.add(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(a_, "w" ) as f: f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : List[str] = ["0", "1", "2", "3"] _UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Dict = ["0", "1", "2", "3"] _UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any ): _UpperCAmelCase : int = ["0", "1", "2", "3"] _UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(a_, "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename("unsupported.ext" ) ) f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) _UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(a_, "w", encoding="utf-8" ) as f: f.write(a_ ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( ): return os.path.join("tests", "features", "data", "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: int, a_: Optional[Any] ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(a_, "w" ) as f: f.write(a_, arcname=os.path.basename(a_ ) ) f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def __UpperCAmelCase ( a_: Tuple ): _UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt", "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt", "w" ) as f: f.write("bar\n" * 10 ) return data_dir
17
1
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __a = '\\n\n' __a = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' __a = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Tuple=None ) -> Optional[int]: """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase : Any = "cuda" else: _UpperCAmelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu" _UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = model.to(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase : List[str] = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(lowerCAmelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase : int = model.config.max_length - 1 else: _UpperCAmelCase : str = model.config.max_length _UpperCAmelCase : List[str] = tokenizer( lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="pt" , return_attention_mask=lowerCAmelCase__ , ).to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = encodings["input_ids"] _UpperCAmelCase : Optional[int] = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : str = CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ): _UpperCAmelCase : Union[str, Any] = min(start_index + batch_size , len(lowerCAmelCase__ ) ) _UpperCAmelCase : List[str] = encoded_texts[start_index:end_index] _UpperCAmelCase : Optional[int] = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase : Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCAmelCase__ ) _UpperCAmelCase : Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase : List[str] = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCAmelCase__ ), attn_mask] , dim=1 ) _UpperCAmelCase : int = encoded_batch with torch.no_grad(): _UpperCAmelCase : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).logits _UpperCAmelCase : Tuple = out_logits[..., :-1, :].contiguous() _UpperCAmelCase : List[str] = labels[..., 1:].contiguous() _UpperCAmelCase : str = attn_mask[..., 1:].contiguous() _UpperCAmelCase : List[str] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , lowerCAmelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase__ )}
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = get_activation("swish" ) self.assertIsInstance(lowerCAmelCase__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 ) def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Dict = get_activation("silu" ) self.assertIsInstance(lowerCAmelCase__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 ) def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" _UpperCAmelCase : Tuple = get_activation("mish" ) self.assertIsInstance(lowerCAmelCase__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 ) def _lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = get_activation("gelu" ) self.assertIsInstance(lowerCAmelCase__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
17
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __a = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0} _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Tuple = batch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Optional[Any] = image_size _UpperCAmelCase : Dict = min_resolution _UpperCAmelCase : str = max_resolution _UpperCAmelCase : List[Any] = size _UpperCAmelCase : Union[str, Any] = do_normalize _UpperCAmelCase : Optional[Any] = do_convert_rgb _UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] _UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" _UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" _UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase : str = 2_0_4_8 _UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : Union[str, Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase : str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase__ ): _UpperCAmelCase : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches _UpperCAmelCase : Any = "Hello" _UpperCAmelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : List[Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) _UpperCAmelCase : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Union[str, Any] = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCAmelCase : List[str] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : str = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase : List[Any] = 3 @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) ) def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCAmelCase : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase : Tuple = image_processor( lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
17
1
'''simple docstring''' from __future__ import annotations class A__ : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : list[list[int]] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Dict = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(lowerCAmelCase__ ) != 0: _UpperCAmelCase : List[Any] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(lowerCAmelCase__ ) != cols: raise error for value in row: if not isinstance(lowerCAmelCase__ , (int, float) ): raise error _UpperCAmelCase : int = rows else: _UpperCAmelCase : List[str] = [] def _lowerCAmelCase ( self : Tuple ) -> list[list[int]]: """simple docstring""" return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.rows ) @property def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" return len(self.rows[0] ) @property def _lowerCAmelCase ( self : Optional[int] ) -> tuple[int, int]: """simple docstring""" return (self.num_rows, self.num_columns) @property def _lowerCAmelCase ( self : Optional[int] ) -> bool: """simple docstring""" return self.order[0] == self.order[1] def _lowerCAmelCase ( self : Union[str, Any] ) -> Matrix: """simple docstring""" _UpperCAmelCase : Any = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCAmelCase ( self : Tuple ) -> bool: """simple docstring""" return bool(self.determinant() ) def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int: """simple docstring""" _UpperCAmelCase : Dict = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(lowerCAmelCase__ ).determinant() def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int: """simple docstring""" if (row + column) % 2 == 0: return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> Matrix: """simple docstring""" return Matrix( [ [self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Matrix: """simple docstring""" return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCAmelCase ( self : str ) -> Matrix: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] ) -> Matrix: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse" ) return self.adjugate() * (1 / determinant) def __repr__( self : Tuple ) -> str: """simple docstring""" return str(self.rows ) def __str__( self : Tuple ) -> str: """simple docstring""" if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]" for row in self.rows ] ) + "]" ) def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int | None = None ) -> None: """simple docstring""" _UpperCAmelCase : Dict = TypeError("Row must be a list containing all ints and/or floats" ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise type_error for value in row: if not isinstance(lowerCAmelCase__ , (int, float) ): raise type_error if len(lowerCAmelCase__ ) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix" ) if position is None: self.rows.append(lowerCAmelCase__ ) else: _UpperCAmelCase : List[Any] = self.rows[0:position] + [row] + self.rows[position:] def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int | None = None ) -> None: """simple docstring""" _UpperCAmelCase : Union[str, Any] = TypeError( "Column must be a list containing all ints and/or floats" ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise type_error for value in column: if not isinstance(lowerCAmelCase__ , (int, float) ): raise type_error if len(lowerCAmelCase__ ) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix" ) if position is None: _UpperCAmelCase : int = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: _UpperCAmelCase : Any = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : List[Any] , lowerCAmelCase__ : object ) -> bool: """simple docstring""" if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return NotImplemented return self.rows == other.rows def __ne__( self : List[str] , lowerCAmelCase__ : object ) -> bool: """simple docstring""" return not self == other def __neg__( self : Dict ) -> Matrix: """simple docstring""" return self * -1 def __add__( self : List[Any] , lowerCAmelCase__ : Matrix ) -> Matrix: """simple docstring""" if self.order != other.order: raise ValueError("Addition requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : Tuple , lowerCAmelCase__ : Matrix ) -> Matrix: """simple docstring""" if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : Optional[Any] , lowerCAmelCase__ : Matrix | int | float ) -> Matrix: """simple docstring""" if isinstance(lowerCAmelCase__ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second" ) return Matrix( [ [Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix" ) def __pow__( self : List[Any] , lowerCAmelCase__ : int ) -> Matrix: """simple docstring""" if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("A Matrix can only be raised to the power of an int" ) if not self.is_square: raise ValueError("Only square matrices can be raised to a power" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power" ) _UpperCAmelCase : List[Any] = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> int: """simple docstring""" return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
1
'''simple docstring''' from importlib import import_module from .logging import get_logger __a = get_logger(__name__) class A__ : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=None ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : int = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module class A__ : """simple docstring""" UpperCamelCase_ : Union[str, Any] = [] def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = obj _UpperCAmelCase : int = target _UpperCAmelCase : Optional[int] = new _UpperCAmelCase : Any = target.split("." )[0] _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Dict = attrs or [] def __enter__( self : List[str] ) -> int: """simple docstring""" *_UpperCAmelCase , _UpperCAmelCase : List[str] = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCAmelCase__ ) ): try: _UpperCAmelCase : int = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) ) _UpperCAmelCase : List[Any] = getattr(self.obj , lowerCAmelCase__ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) ) _UpperCAmelCase : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # finally set the target attribute setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCAmelCase : Dict = getattr(import_module(".".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCAmelCase__ ) is attr_value: _UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCAmelCase__ ) setattr(self.obj , lowerCAmelCase__ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCAmelCase : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCAmelCase__ , self.new ) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self : Optional[int] , *lowerCAmelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" for attr in list(self.original ): setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.__enter__() self._active_patches.append(self ) def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
17
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
1
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __UpperCAmelCase ( *a_: str ): if not isinstance(a_, a_ ): _UpperCAmelCase : Optional[int] = list(a_ ) for i in range(len(a_ ) ): _UpperCAmelCase : str = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __UpperCAmelCase ( a_: Exception ): _UpperCAmelCase : List[Any] = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(a_, a_ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __UpperCAmelCase ( a_: callable = None, a_: int = 128 ): if function is None: return functools.partial(a_, starting_batch_size=a_ ) _UpperCAmelCase : List[str] = starting_batch_size def decorator(*a_: Optional[int], **a_: List[str] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase : str = list(inspect.signature(a_ ).parameters.keys() ) # Guard against user error if len(a_ ) < (len(a_ ) + 1): _UpperCAmelCase : str = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] ) raise TypeError( f"""Batch size was passed into `{function.__name__}` as the first argument when called.""" f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(a_, *a_, **a_ ) except Exception as e: if should_reduce_batch_size(a_ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
17
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A__ : """simple docstring""" UpperCamelCase_ : Any = XGLMConfig UpperCamelCase_ : Union[str, Any] = {} UpperCamelCase_ : Dict = '''gelu''' def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : str = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : List[Any] = use_input_mask _UpperCAmelCase : Optional[int] = use_labels _UpperCAmelCase : str = vocab_size _UpperCAmelCase : int = d_model _UpperCAmelCase : Tuple = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Tuple = ffn_dim _UpperCAmelCase : Any = activation_function _UpperCAmelCase : Union[str, Any] = activation_dropout _UpperCAmelCase : Union[str, Any] = attention_dropout _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : int = initializer_range _UpperCAmelCase : Any = None _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : Tuple = 1 def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _UpperCAmelCase : Any = None if self.use_input_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Optional[Any] = self.get_config() _UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , ) def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : List[Any] = config_and_inputs _UpperCAmelCase : Optional[int] = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase_ : Tuple = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Tuple = False def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : Dict = TFXGLMModelTester(self ) _UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 ) def _lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" super().test_resize_token_embeddings() @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on _UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) _UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" ) _UpperCAmelCase : int = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): _UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] ) _UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" _UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) _UpperCAmelCase : Optional[int] = "left" # use different length sentences to test batching _UpperCAmelCase : Tuple = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] _UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = inputs["input_ids"] _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 ) _UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids _UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids _UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 ) _UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files", [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ], ) def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md", "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(a_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info", [ DatasetInfo(), DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ), ], ) def __UpperCAmelCase ( a_: Union[str, Any], a_: DatasetInfo ): _UpperCAmelCase : Tuple = str(a_ ) dataset_info.write_to_directory(a_ ) _UpperCAmelCase : Any = DatasetInfo.from_directory(a_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(a_, "dataset_info.json" ) ) def __UpperCAmelCase ( ): _UpperCAmelCase : Optional[int] = DatasetInfo( description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 42}], download_checksums={}, download_size=1_337, post_processing_size=442, dataset_size=1_234, size_in_bytes=1_337 + 442 + 1_234, ) _UpperCAmelCase : Tuple = dataset_info._to_yaml_dict() assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(a_ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(a_ ) assert dataset_info_yaml_dict == reloaded def __UpperCAmelCase ( ): _UpperCAmelCase : str = DatasetInfo() _UpperCAmelCase : List[str] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict", [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=42, ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1_337 ), } ), ], ) def __UpperCAmelCase ( a_: str, a_: DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(a_ ) dataset_infos_dict.write_to_directory(a_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(a_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(a_, "README.md" ) )
17
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_upernet': ['UperNetConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'UperNetForSemanticSegmentation', 'UperNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
'''simple docstring''' from math import factorial def __UpperCAmelCase ( a_: int = 100 ): return sum(map(a_, str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
17
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Any, a_: Optional[int] ): if isinstance(a_, a_ ): _UpperCAmelCase : Optional[Any] = np.full((len(a_ ), sequence_length, 2), a_ ) else: _UpperCAmelCase : List[str] = np.full((len(a_ ), sequence_length), a_ ) for i, tensor in enumerate(a_ ): if padding_side == "right": if isinstance(a_, a_ ): _UpperCAmelCase : Any = tensor[:sequence_length] else: _UpperCAmelCase : str = tensor[:sequence_length] else: if isinstance(a_, a_ ): _UpperCAmelCase : List[Any] = tensor[:sequence_length] else: _UpperCAmelCase : Tuple = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( a_: Dict ): _UpperCAmelCase : Optional[Any] = ord(a_ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True _UpperCAmelCase : Union[str, Any] = unicodedata.category(a_ ) if cat.startswith("P" ): return True return False @dataclass class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : int = -1_00 UpperCamelCase_ : str = "pt" def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> Optional[Any]: """simple docstring""" import torch _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch _UpperCAmelCase : Dict = torch.tensor(batch["entity_ids"] ).shape[1] _UpperCAmelCase : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": _UpperCAmelCase : Dict = [ list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels ] else: _UpperCAmelCase : List[str] = [ [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels ] _UpperCAmelCase : int = [feature["ner_tags"] for feature in features] _UpperCAmelCase : Optional[int] = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = [feature["original_entity_spans"] for feature in features] _UpperCAmelCase : Optional[Any] = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[str] = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
17
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __a = (3, 9, -11, 0, 7, 5, 1, -1) __a = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : int UpperCamelCase_ : Node | None class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None: """simple docstring""" _UpperCAmelCase : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): _UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head ) def __iter__( self : int ) -> Iterator[int]: """simple docstring""" _UpperCAmelCase : List[Any] = self.head while node: yield node.data _UpperCAmelCase : List[str] = node.next_node def __len__( self : Any ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ): return SortedLinkedList(list(a_ ) + list(a_ ) ) if __name__ == "__main__": import doctest doctest.testmod() __a = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
17
'''simple docstring''' def __UpperCAmelCase ( a_: str ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : Optional[Any] = "" while len(a_ ) % 3 != 0: _UpperCAmelCase : List[Any] = "0" + bin_string _UpperCAmelCase : Dict = [ bin_string[index : index + 3] for index in range(len(a_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Optional[Any] = 0 for index, val in enumerate(a_ ): oct_val += int(2 ** (2 - index) * int(a_ ) ) oct_string += str(a_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
17
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __a = datasets.utils.logging.get_logger(__name__) __a = ['names', 'prefix'] __a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] __a = ['encoding_errors', 'on_bad_lines'] __a = ['date_format'] @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : str = "," UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer" UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[List[str]] = None UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : Optional[list] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Union[int, List[int]]] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[Union[str, List[str]]] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = "." UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : str = '"' UpperCamelCase_ : int = 0 UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = True UpperCamelCase_ : int = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : int = 1_00_00 UpperCamelCase_ : Optional[datasets.Features] = None UpperCamelCase_ : Optional[str] = "strict" UpperCamelCase_ : Literal["error", "warn", "skip"] = "error" UpperCamelCase_ : Optional[str] = None def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" if self.delimiter is not None: _UpperCAmelCase : Any = self.delimiter if self.column_names is not None: _UpperCAmelCase : List[Any] = self.column_names @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : int = CsvConfig def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = [files] _UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = [files] _UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast _UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCAmelCase : Optional[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): _UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" ) raise
17
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __UpperCAmelCase ( a_: str ): for param in module.parameters(): _UpperCAmelCase : Any = False def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def __UpperCAmelCase ( a_: Optional[Any] ): _UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def __UpperCAmelCase ( ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" ) return timestamp
17
1
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> Any: """simple docstring""" _UpperCAmelCase : Tuple = jnp.ones((batch_size, length) ) / length return scores def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Any = 2_0 _UpperCAmelCase : str = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__ ) # tweak scores to not be uniform anymore _UpperCAmelCase : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch _UpperCAmelCase : Optional[Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax _UpperCAmelCase : Union[str, Any] = jax.nn.softmax(lowerCAmelCase__ , axis=-1 ) _UpperCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 ) _UpperCAmelCase : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 ) _UpperCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : List[str] = None _UpperCAmelCase : Optional[int] = 1_0 _UpperCAmelCase : int = 2 # create ramp distribution _UpperCAmelCase : str = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() _UpperCAmelCase : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size _UpperCAmelCase : Optional[int] = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase : Union[str, Any] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case _UpperCAmelCase : Optional[int] = 5 _UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) _UpperCAmelCase : Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, length) ).copy() _UpperCAmelCase : List[str] = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Any = 1_0 _UpperCAmelCase : Union[str, Any] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) _UpperCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) _UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 ) _UpperCAmelCase : Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 _UpperCAmelCase : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) # check edge cases with negative and extreme logits _UpperCAmelCase : List[str] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme _UpperCAmelCase : int = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept _UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) _UpperCAmelCase : List[str] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _UpperCAmelCase : int = 2_0 _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ ) # check that min length is applied at length 5 _UpperCAmelCase : Optional[Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) _UpperCAmelCase : List[str] = 5 _UpperCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] ) # check that min length is not applied anymore at length 15 _UpperCAmelCase : int = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = 1_5 _UpperCAmelCase : int = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = 2_0 _UpperCAmelCase : int = 4 _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) # check that all scores are -inf except the bos_token_id score _UpperCAmelCase : Any = ids_tensor((batch_size, 1) , vocab_size=2_0 ) _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Any = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 _UpperCAmelCase : Any = 3 _UpperCAmelCase : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : str = 2_0 _UpperCAmelCase : List[Any] = 4 _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[int] = 5 _UpperCAmelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) # check that all scores are -inf except the eos_token_id when max_length is reached _UpperCAmelCase : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=2_0 ) _UpperCAmelCase : Optional[int] = 4 _UpperCAmelCase : str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase : int = 4 _UpperCAmelCase : Optional[int] = 1_0 _UpperCAmelCase : List[Any] = 1_5 _UpperCAmelCase : Optional[Any] = 2 _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Any = 1_5 # dummy input_ids and scores _UpperCAmelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = input_ids.copy() _UpperCAmelCase : str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scores.copy() # instantiate all dist processors _UpperCAmelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase : str = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCAmelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : str = 1_0 # no processor list _UpperCAmelCase : Optional[int] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : str = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # with processor list _UpperCAmelCase : List[str] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCAmelCase : Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = 4 _UpperCAmelCase : int = 1_0 _UpperCAmelCase : Any = 1_5 _UpperCAmelCase : List[Any] = 2 _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[int] = 1_5 # dummy input_ids and scores _UpperCAmelCase : List[Any] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ ) _UpperCAmelCase : List[str] = input_ids.copy() _UpperCAmelCase : Tuple = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = scores.copy() # instantiate all dist processors _UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase : Dict = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCAmelCase : str = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : Dict = 1_0 # no processor list def run_no_processor_list(lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ): _UpperCAmelCase : List[str] = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) _UpperCAmelCase : int = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) return scores # with processor list def run_processor_list(lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ): _UpperCAmelCase : Union[str, Any] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCAmelCase : Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) return scores _UpperCAmelCase : Any = jax.jit(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = jax.jit(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
17
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
1
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[Any] = '''ssube/stable-diffusion-x4-upscaler-onnx''' def _lowerCAmelCase ( self : int , lowerCAmelCase__ : List[Any]=0 ) -> Any: """simple docstring""" _UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCAmelCase__ ) ) _UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs() _UpperCAmelCase : str = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Union[str, Any] = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _UpperCAmelCase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = self.get_dummy_inputs() _UpperCAmelCase : List[Any] = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[Any] = np.array( [0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _UpperCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = self.get_dummy_inputs() _UpperCAmelCase : Any = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array( [0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _UpperCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : str = self.get_dummy_inputs() _UpperCAmelCase : Optional[Any] = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Dict = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _UpperCAmelCase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = self.get_dummy_inputs() _UpperCAmelCase : Tuple = pipe(**lowerCAmelCase__ ).images _UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Union[str, Any] = np.array( [0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" @property def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _UpperCAmelCase : int = ort.SessionOptions() _UpperCAmelCase : List[str] = False return options def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _UpperCAmelCase : Dict = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default _UpperCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = "A fantasy landscape, trending on artstation" _UpperCAmelCase : List[Any] = torch.manual_seed(0 ) _UpperCAmelCase : int = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type="np" , ) _UpperCAmelCase : Dict = output.images _UpperCAmelCase : Dict = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Any = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _UpperCAmelCase : Dict = init_image.resize((1_2_8, 1_2_8) ) _UpperCAmelCase : Dict = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) _UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : str = "A fantasy landscape, trending on artstation" _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : Dict = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type="np" , ) _UpperCAmelCase : List[str] = output.images _UpperCAmelCase : List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Union[str, Any] = np.array( [0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b" _UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin __a = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n' class A__ ( unittest.TestCase , UpperCamelCase ): """simple docstring""" def _lowerCAmelCase ( self : str ) -> Any: """simple docstring""" _UpperCAmelCase : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() _UpperCAmelCase : Optional[int] = load_tool("text-question-answering" , remote=lowerCAmelCase__ ) def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = self.tool(lowerCAmelCase__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCAmelCase__ , "launched the BigScience Research Workshop" ) def _lowerCAmelCase ( self : Optional[Any] ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = self.remote_tool(lowerCAmelCase__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCAmelCase__ , "launched the BigScience Research Workshop" ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.tool(text=lowerCAmelCase__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCAmelCase__ , "launched the BigScience Research Workshop" ) def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = self.remote_tool(text=lowerCAmelCase__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCAmelCase__ , "launched the BigScience Research Workshop" )
17
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __UpperCAmelCase ( a_: int ): # A local function to see if a dot lands in the circle. def is_in_circle(a_: float, a_: float ) -> bool: _UpperCAmelCase : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(a_ ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : Optional[int] = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __UpperCAmelCase ( a_: int, a_: Callable[[float], float], a_: float = 0.0, a_: float = 1.0, ): return mean( function_to_integrate(uniform(a_, a_ ) ) for _ in range(a_ ) ) * (max_value - min_value) def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ): def identity_function(a_: float ) -> float: return x _UpperCAmelCase : Union[str, Any] = area_under_curve_estimator( a_, a_, a_, a_ ) _UpperCAmelCase : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __UpperCAmelCase ( a_: int ): def function_to_integrate(a_: float ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( a_, a_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Optional[int] = 2_5_0 _UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Tuple = self._get_tensors(5 ) _UpperCAmelCase : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = MaxLengthCriteria(max_length=1_0 ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) _UpperCAmelCase , _UpperCAmelCase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase , _UpperCAmelCase : int = self._get_tensors(1_0 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_tensors(5 ) _UpperCAmelCase : Tuple = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def _lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) _UpperCAmelCase : Union[str, Any] = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
17
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LayoutLMv2FeatureExtractor'] __a = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
17
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = StableDiffusionSAGPipeline UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Dict = False def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _UpperCAmelCase : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _UpperCAmelCase : List[str] = CLIPTextModel(lowerCAmelCase__ ) _UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase : str = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=0 ) -> Any: """simple docstring""" if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) _UpperCAmelCase : Any = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = "." _UpperCAmelCase : List[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = sag_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" ) _UpperCAmelCase : Tuple = output.images _UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _UpperCAmelCase : Dict = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Dict = "." _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = sag_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" ) _UpperCAmelCase : Union[str, Any] = output.images _UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _UpperCAmelCase : Any = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = "." _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : str = sag_pipe( [prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" , ) _UpperCAmelCase : Union[str, Any] = output.images assert image.shape == (1, 5_1_2, 7_6_8, 3)
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' def __UpperCAmelCase ( a_: int = 10, a_: int = 1_000, a_: bool = True ): assert ( isinstance(a_, a_ ) and isinstance(a_, a_ ) and isinstance(a_, a_ ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def __UpperCAmelCase ( a_: int, a_: int ): return int((number_a + number_a) / 2 ) def __UpperCAmelCase ( a_: int, a_: int, a_: int ): assert ( isinstance(a_, a_ ) and isinstance(a_, a_ ) and isinstance(a_, a_ ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(a_: int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) _UpperCAmelCase : Tuple = lower _UpperCAmelCase : Dict = higher _UpperCAmelCase : Tuple = [] while True: _UpperCAmelCase : Dict = get_avg(a_, a_ ) last_numbers.append(a_ ) if answer(a_ ) == "low": _UpperCAmelCase : Union[str, Any] = number elif answer(a_ ) == "high": _UpperCAmelCase : Optional[int] = number else: break print(f"""guess the number : {last_numbers[-1]}""" ) print(f"""details : {last_numbers!s}""" ) def __UpperCAmelCase ( ): _UpperCAmelCase : Union[str, Any] = int(input("Enter lower value : " ).strip() ) _UpperCAmelCase : Dict = int(input("Enter high value : " ).strip() ) _UpperCAmelCase : Any = int(input("Enter value to guess : " ).strip() ) guess_the_number(a_, a_, a_ ) if __name__ == "__main__": main()
17
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __a = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : List[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: _UpperCAmelCase : List[str] = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class A__ : """simple docstring""" UpperCamelCase_ : PreTrainedTokenizerBase UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels" _UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features] _UpperCAmelCase : str = len(lowerCAmelCase__ ) _UpperCAmelCase : int = len(features[0]["input_ids"] ) _UpperCAmelCase : str = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features ] _UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) ) _UpperCAmelCase : Any = self.tokenizer.pad( lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten _UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels _UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) return batch def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", a_, a_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[int] = training_args.get_process_log_level() logger.setLevel(a_ ) datasets.utils.logging.set_verbosity(a_ ) transformers.utils.logging.set_verbosity(a_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : str = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Dict = data_args.train_file.split("." )[-1] _UpperCAmelCase : Optional[int] = load_dataset( a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. _UpperCAmelCase : Dict = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. _UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )] _UpperCAmelCase : List[Any] = "sent1" _UpperCAmelCase : Optional[int] = "sent2" if data_args.max_seq_length is None: _UpperCAmelCase : List[str] = tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) _UpperCAmelCase : Dict = 1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) _UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_: Union[str, Any] ): _UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]] _UpperCAmelCase : Tuple = examples[question_header_name] _UpperCAmelCase : Optional[Any] = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ ) ] # Flatten out _UpperCAmelCase : List[str] = list(chain(*a_ ) ) _UpperCAmelCase : Dict = list(chain(*a_ ) ) # Tokenize _UpperCAmelCase : List[Any] = tokenizer( a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) _UpperCAmelCase : int = raw_datasets["train"] if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples ) _UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): _UpperCAmelCase : Union[str, Any] = train_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) _UpperCAmelCase : Dict = raw_datasets["validation"] if data_args.max_eval_samples is not None: _UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples ) _UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): _UpperCAmelCase : Optional[int] = eval_dataset.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator _UpperCAmelCase : Tuple = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_: Tuple ): _UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions _UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer _UpperCAmelCase : Any = Trainer( model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, ) # Training if training_args.do_train: _UpperCAmelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : List[str] = last_checkpoint _UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : str = train_result.metrics _UpperCAmelCase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ ) ) _UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) ) trainer.log_metrics("train", a_ ) trainer.save_metrics("train", a_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : List[Any] = trainer.evaluate() _UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ ) _UpperCAmelCase : Tuple = min(a_, len(a_ ) ) trainer.log_metrics("eval", a_ ) trainer.save_metrics("eval", a_ ) _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**a_ ) else: trainer.create_model_card(**a_ ) def __UpperCAmelCase ( a_: int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
17
1
'''simple docstring''' from __future__ import annotations import time __a = list[tuple[int, int]] __a = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class A__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Node | None ) -> int: """simple docstring""" _UpperCAmelCase : str = pos_x _UpperCAmelCase : List[str] = pos_y _UpperCAmelCase : Any = (pos_y, pos_x) _UpperCAmelCase : Dict = goal_x _UpperCAmelCase : int = goal_y _UpperCAmelCase : Union[str, Any] = parent class A__ : """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : str = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase__ ) _UpperCAmelCase : List[str] = [self.start] _UpperCAmelCase : str = False def _lowerCAmelCase ( self : str ) -> Path | None: """simple docstring""" while self.node_queue: _UpperCAmelCase : List[str] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: _UpperCAmelCase : Tuple = True return self.retrace_path(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = self.get_successors(lowerCAmelCase__ ) for node in successors: self.node_queue.append(lowerCAmelCase__ ) if not self.reached: return [self.start.pos] return None def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Node ) -> list[Node]: """simple docstring""" _UpperCAmelCase : Any = [] for action in delta: _UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1] _UpperCAmelCase : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , lowerCAmelCase__ ) ) return successors def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Node | None ) -> Path: """simple docstring""" _UpperCAmelCase : List[Any] = node _UpperCAmelCase : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _UpperCAmelCase : str = current_node.parent path.reverse() return path class A__ : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ) -> Any: """simple docstring""" _UpperCAmelCase : Dict = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = False def _lowerCAmelCase ( self : List[str] ) -> Path | None: """simple docstring""" while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: _UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0 ) _UpperCAmelCase : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: _UpperCAmelCase : Tuple = True return self.retrace_bidirectional_path( lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : List[str] = current_bwd_node _UpperCAmelCase : int = current_fwd_node _UpperCAmelCase : Optional[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase__ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase__ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase__ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node ) -> Path: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.fwd_bfs.retrace_path(lowerCAmelCase__ ) _UpperCAmelCase : List[str] = self.bwd_bfs.retrace_path(lowerCAmelCase__ ) bwd_path.pop() bwd_path.reverse() _UpperCAmelCase : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __a = (0, 0) __a = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __a = time.time() __a = BreadthFirstSearch(init, goal) __a = bfs.search() __a = time.time() - start_bfs_time print('Unidirectional BFS computation time : ', bfs_time) __a = time.time() __a = BidirectionalBreadthFirstSearch(init, goal) __a = bd_bfs.search() __a = time.time() - start_bd_bfs_time print('Bidirectional BFS computation time : ', bd_bfs_time)
17
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str: """simple docstring""" super().__init__() _UpperCAmelCase : List[str] = model _UpperCAmelCase : Dict = 2 _UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass def __UpperCAmelCase ( a_: str, a_: str, a_: str ): # load longformer model from model identifier _UpperCAmelCase : int = LongformerModel.from_pretrained(a_ ) _UpperCAmelCase : Any = LightningModel(a_ ) _UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model _UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(a_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
1