code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): # picklable for multiprocessing return x.sum() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): # picklable for multiprocessing return i + 1 @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : str class UpperCamelCase ( snake_case ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = {} _lowercase : Any = [] _lowercase : Dict = 1 _lowercase : Optional[int] = [1, 2] _lowercase : int = {"""a""": 1, """b""": 2} _lowercase : Tuple = {"""a""": [1, 2], """b""": [3, 4]} _lowercase : str = {"""a""": {"""1""": 1}, """b""": 2} _lowercase : List[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} _lowercase : Any = {} _lowercase : Dict = [] _lowercase : List[str] = 2 _lowercase : Union[str, Any] = [2, 3] _lowercase : int = {"""a""": 2, """b""": 3} _lowercase : Any = {"""a""": [2, 3], """b""": [4, 5]} _lowercase : Union[str, Any] = {"""a""": {"""1""": 2}, """b""": 3} _lowercase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ) ,UpperCAmelCase_ ) _lowercase : List[str] = 2 self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) _lowercase : Tuple = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )} _lowercase : List[Any] = {"""a""": 2, """b""": 0, """c""": 2} _lowercase : Optional[int] = { """a""": np.eye(2 ).astype(UpperCAmelCase_ ), """b""": np.zeros(3 ).astype(UpperCAmelCase_ ), """c""": np.ones(2 ).astype(UpperCAmelCase_ ), } self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,map_numpy=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,map_numpy=UpperCAmelCase_ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,) self.assertEqual(map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,map_numpy=UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase_ ,UpperCAmelCase_ ,map_numpy=UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,) with self.assertRaises(UpperCAmelCase_ ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase_ : x + 1 ,UpperCAmelCase_ ,num_proc=UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Dict = {"""a""": 1, """b""": 2} _lowercase : Any = {"""a""": 3, """b""": 4} _lowercase : Optional[Any] = {"""a""": 5, """b""": 6} _lowercase : Any = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = "bar" _lowercase : Union[str, Any] = Foo() self.assertEqual(foo.my_attr ,"""bar""" ) with temporary_assignment(UpperCAmelCase_ ,"""my_attr""" ,"""BAR""" ): self.assertEqual(foo.my_attr ,"""BAR""" ) self.assertEqual(foo.my_attr ,"""bar""" ) @pytest.mark.parametrize( """iterable_length, num_proc, expected_num_proc""" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch( """datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool: _lowercase : List[Any] = {F"""{i}""": i for i in range(__UpperCAmelCase )} _lowercase : Optional[Any] = map_nested(lambda __UpperCAmelCase : x + 10 , __UpperCAmelCase , num_proc=__UpperCAmelCase , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class UpperCamelCase ( snake_case ): """simple docstring""" @require_tf def lowerCamelCase__ ( self ): import tensorflow as tf from tensorflow.keras import layers _lowercase : Optional[Any] = layers.Dense(2 ) def gen_random_output(): _lowercase : str = tf.random.uniform((1, 3) ) return model(UpperCAmelCase_ ).numpy() with temp_seed(42 ,set_tensorflow=UpperCAmelCase_ ): _lowercase : List[str] = gen_random_output() with temp_seed(42 ,set_tensorflow=UpperCAmelCase_ ): _lowercase : Any = gen_random_output() _lowercase : str = gen_random_output() np.testing.assert_equal(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) @require_torch def lowerCamelCase__ ( self ): import torch def gen_random_output(): _lowercase : Any = torch.nn.Linear(3 ,2 ) _lowercase : str = torch.rand(1 ,3 ) return model(UpperCAmelCase_ ).detach().numpy() with temp_seed(42 ,set_pytorch=UpperCAmelCase_ ): _lowercase : Optional[int] = gen_random_output() with temp_seed(42 ,set_pytorch=UpperCAmelCase_ ): _lowercase : List[Any] = gen_random_output() _lowercase : List[str] = gen_random_output() np.testing.assert_equal(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) def lowerCamelCase__ ( self ): def gen_random_output(): return np.random.rand(1 ,3 ) with temp_seed(42 ): _lowercase : Dict = gen_random_output() with temp_seed(42 ): _lowercase : Optional[Any] = gen_random_output() _lowercase : Any = gen_random_output() np.testing.assert_equal(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertGreater(np.abs(outa - outa ).sum() ,0 ) @pytest.mark.parametrize("""input_data""" , [{}] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = NestedDataStructure(__UpperCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( """data, expected_output""" , [ ({}, []), ([], []), ("""foo""", ["""foo"""]), (["""foo""", """bar"""], ["""foo""", """bar"""]), ([["""foo""", """bar"""]], ["""foo""", """bar"""]), ([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]), ([[["""foo"""], """bar"""]], ["""foo""", """bar"""]), ({"""a""": 1, """b""": 2}, [1, 2]), ({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]), ({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]), ] , ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Dict = NestedDataStructure(__UpperCAmelCase ).flatten() assert output == expected_output def __SCREAMING_SNAKE_CASE ( ): _lowercase : int = A(x=1 , y="""foobar""" ) _lowercase : Optional[int] = {"""x""": 1, """y""": """foobar"""} assert asdict(__UpperCAmelCase ) == expected_output _lowercase : Optional[Any] = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]} _lowercase : Optional[Any] = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]} assert asdict(__UpperCAmelCase ) == expected_output with pytest.raises(__UpperCAmelCase ): asdict([1, A(x=10 , y="""foo""" )] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return text.split() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __SCREAMING_SNAKE_CASE ( ): with Pool(2 ) as pool: _lowercase : Dict = list(iflatmap_unordered(__UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(__UpperCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _lowercase : Optional[int] = list(iflatmap_unordered(__UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(__UpperCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _lowercase : Any = [] for yield_time, content in iflatmap_unordered( __UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCAmelCase ) assert out.count("""a""" ) == 2 assert out.count("""b""" ) == 2 assert len(__UpperCAmelCase ) == 4
336
"""simple docstring""" import pprint import requests UpperCAmelCase: Tuple = """https://zenquotes.io/api""" def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCAmelCase: int = random_quotes() pprint.pprint(response)
336
1
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class UpperCamelCase ( snake_case ): """simple docstring""" @staticmethod @abstractmethod def lowerCamelCase__ ( UpperCAmelCase_ ): raise NotImplementedError() @abstractmethod def lowerCamelCase__ ( self ): raise NotImplementedError()
336
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : int def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _lowercase : Tuple = all_rotations(__UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCAmelCase ), } return response def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _lowercase : Optional[Any] = int(__UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _lowercase : int = [""""""] * len(__UpperCAmelCase ) for _ in range(len(__UpperCAmelCase ) ): for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """ UpperCAmelCase: int = input(entry_msg).strip() UpperCAmelCase: List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
336
1
"""simple docstring""" import os import string import sys UpperCAmelCase: Dict = 1 << 8 UpperCAmelCase: List[str] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } UpperCAmelCase: List[Any] = KEYMAP["""up"""] UpperCAmelCase: List[Any] = KEYMAP["""left"""] if sys.platform == "win32": UpperCAmelCase: Any = [] UpperCAmelCase: Tuple = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): UpperCAmelCase: Union[str, Any] = ord(str(i)) def __SCREAMING_SNAKE_CASE ( ): if os.name == "nt": import msvcrt _lowercase : str = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__UpperCAmelCase ) == 0: # Read the keystroke _lowercase : Optional[Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowercase : Dict = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowercase : List[Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__UpperCAmelCase ) if ord(__UpperCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowercase : Tuple = chr(KEYMAP["""esc"""] ) except KeyError: _lowercase : Union[str, Any] = cha[1] else: _lowercase : List[str] = ch.decode(__UpperCAmelCase ) else: _lowercase : Optional[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowercase : List[str] = sys.stdin.fileno() _lowercase : List[str] = termios.tcgetattr(__UpperCAmelCase ) try: tty.setraw(__UpperCAmelCase ) _lowercase : List[str] = sys.stdin.read(1 ) finally: termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase ) return ch def __SCREAMING_SNAKE_CASE ( ): _lowercase : Any = get_raw_chars() if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__UpperCAmelCase ) == KEYMAP["esc"]: _lowercase : int = get_raw_chars() if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]: _lowercase : str = get_raw_chars() if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
336
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
1
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): print("""Loading config file...""" ) def flatten_yaml_as_dict(__UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="." ): _lowercase : Dict = [] for k, v in d.items(): _lowercase : str = parent_key + sep + k if parent_key else k if isinstance(__UpperCAmelCase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCAmelCase , __UpperCAmelCase , sep=__UpperCAmelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCAmelCase ) _lowercase : Any = argparse.Namespace() with open(__UpperCAmelCase , """r""" ) as yaml_file: try: _lowercase : List[str] = yaml.load(__UpperCAmelCase , Loader=yaml.FullLoader ) _lowercase : int = flatten_yaml_as_dict(__UpperCAmelCase ) for k, v in flat_cfg.items(): setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) except yaml.YAMLError as exc: logger.error("""Error while loading config file: {}. Error message: {}""".format(__UpperCAmelCase , str(__UpperCAmelCase ) ) ) return config def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : int = MobileViTVaConfig() _lowercase : Dict = False # dataset if task_name.startswith("""imagenet1k_""" ): _lowercase : str = 1000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _lowercase : Union[str, Any] = 384 else: _lowercase : Any = 256 _lowercase : Optional[int] = """imagenet-1k-id2label.json""" elif task_name.startswith("""imagenet21k_to_1k_""" ): _lowercase : List[Any] = 21000 if int(task_name.strip().split("""_""" )[-1] ) == 384: _lowercase : Optional[Any] = 384 else: _lowercase : Tuple = 256 _lowercase : Dict = """imagenet-22k-id2label.json""" elif task_name.startswith("""ade20k_""" ): _lowercase : Optional[Any] = 151 _lowercase : Tuple = 512 _lowercase : Optional[int] = """ade20k-id2label.json""" _lowercase : int = True elif task_name.startswith("""voc_""" ): _lowercase : List[Any] = 21 _lowercase : Tuple = 512 _lowercase : int = """pascal-voc-id2label.json""" _lowercase : int = True # orig_config _lowercase : List[str] = load_orig_config_file(__UpperCAmelCase ) assert getattr(__UpperCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model" _lowercase : Dict = getattr(__UpperCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 ) assert ( getattr(__UpperCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _lowercase : int = getattr(__UpperCAmelCase , """model.classification.activation.name""" , """swish""" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _lowercase : Optional[Any] = getattr(__UpperCAmelCase , """model.segmentation.output_stride""" , 16 ) if "_deeplabv3" in task_name: _lowercase : Tuple = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] ) _lowercase : Union[str, Any] = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 ) _lowercase : str = getattr(__UpperCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 ) # id2label _lowercase : List[str] = """huggingface/label-files""" _lowercase : List[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowercase : Optional[int] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} _lowercase : int = idalabel _lowercase : str = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : List[str] = dct.pop(__UpperCAmelCase ) _lowercase : Optional[int] = val def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ): if base_model: _lowercase : Dict = """""" else: _lowercase : Union[str, Any] = """mobilevitv2.""" _lowercase : str = [] for k in state_dict.keys(): if k[:8] == "encoder.": _lowercase : Any = k[8:] else: _lowercase : int = k if ".block." in k: _lowercase : List[Any] = k_new.replace(""".block.""" , """.""" ) if ".conv." in k: _lowercase : Optional[int] = k_new.replace(""".conv.""" , """.convolution.""" ) if ".norm." in k: _lowercase : Any = k_new.replace(""".norm.""" , """.normalization.""" ) if "conv_1." in k: _lowercase : Any = k_new.replace("""conv_1.""" , F"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if F"""layer_{i}.""" in k: _lowercase : int = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: _lowercase : str = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" ) if ".red_1x1." in k: _lowercase : Optional[int] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" ) for i in [3, 4, 5]: if F"""layer_{i}.0.""" in k: _lowercase : Any = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if F"""layer_{i}.1.local_rep.0.""" in k: _lowercase : Dict = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if F"""layer_{i}.1.local_rep.1.""" in k: _lowercase : Tuple = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: _lowercase : List[Any] = [0, 1] elif i == 4: _lowercase : Tuple = [0, 1, 2, 3] elif i == 5: _lowercase : List[str] = [0, 1, 2] for j in j_in: if F"""layer_{i}.1.global_rep.{j}.""" in k: _lowercase : Union[str, Any] = k_new.replace( F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if F"""layer_{i}.1.global_rep.{j+1}.""" in k: _lowercase : int = k_new.replace( F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if F"""layer_{i}.1.conv_proj.""" in k: _lowercase : Union[str, Any] = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: _lowercase : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" ) if "pre_norm_attn.1." in k: _lowercase : Tuple = k_new.replace("""pre_norm_attn.1.""" , """attention.""" ) if "pre_norm_ffn.0." in k: _lowercase : Tuple = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" ) if "pre_norm_ffn.1." in k: _lowercase : int = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" ) if "pre_norm_ffn.3." in k: _lowercase : Union[str, Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" ) if "classifier.1." in k: _lowercase : Optional[Any] = k_new.replace("""classifier.1.""" , """classifier.""" ) if "seg_head." in k: _lowercase : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" ) if ".aspp_layer." in k: _lowercase : Dict = k_new.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in k: _lowercase : int = k_new.replace(""".aspp_pool.""" , """.""" ) rename_keys.append((k, k_new) ) return rename_keys def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Optional[int] = [] for k in state_dict.keys(): if k.startswith("""seg_head.aux_head.""" ): keys_to_ignore.append(__UpperCAmelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _lowercase : List[Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : int = get_mobilevitva_config(__UpperCAmelCase , __UpperCAmelCase ) # load original state_dict _lowercase : Tuple = torch.load(__UpperCAmelCase , map_location="""cpu""" ) # load huggingface model if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ): _lowercase : Dict = MobileViTVaForSemanticSegmentation(__UpperCAmelCase ).eval() _lowercase : Tuple = False else: _lowercase : int = MobileViTVaForImageClassification(__UpperCAmelCase ).eval() _lowercase : str = False # remove and rename some keys of load the original model _lowercase : Optional[int] = checkpoint remove_unused_keys(__UpperCAmelCase ) _lowercase : str = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # load modified state_dict model.load_state_dict(__UpperCAmelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor _lowercase : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _lowercase : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _lowercase : List[Any] = model(**__UpperCAmelCase ) # verify classification model if task_name.startswith("""imagenet""" ): _lowercase : Tuple = outputs.logits _lowercase : Dict = logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0: # expected_logits for base variant _lowercase : Any = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ) assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""imagenet1k_256""", type=str, help=( """Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """ """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ """imagenet1k_256""", """imagenet1k_384""", """imagenet21k_to_1k_256""", """imagenet21k_to_1k_384""", """ade20k_deeplabv3""", """voc_deeplabv3""", ], ) parser.add_argument( """--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCAmelCase: str = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
336
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor UpperCAmelCase: Optional[int] = logging.get_logger(__name__) class UpperCamelCase ( snake_case ): """simple docstring""" def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): warnings.warn( """The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PerceiverImageProcessor instead.""" ,UpperCAmelCase_ ,) super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
336
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase: Tuple = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Tuple = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,): _lowercase : Dict = vocab_size _lowercase : List[str] = action_weight _lowercase : int = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : Any = action_dim _lowercase : List[str] = observation_dim _lowercase : Union[str, Any] = transition_dim _lowercase : str = learning_rate _lowercase : Tuple = n_layer _lowercase : Optional[int] = n_head _lowercase : List[str] = n_embd _lowercase : List[str] = embd_pdrop _lowercase : Optional[Any] = attn_pdrop _lowercase : List[Any] = resid_pdrop _lowercase : str = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : List[Any] = kaiming_initializer_range _lowercase : List[Any] = use_cache super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : list[list[float]] = [] for data in source_data: for i, el in enumerate(__UpperCAmelCase ): if len(__UpperCAmelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__UpperCAmelCase ) ) return data_lists def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : list[list[float]] = [] for dlist, weight in zip(__UpperCAmelCase , __UpperCAmelCase ): _lowercase : Dict = min(__UpperCAmelCase ) _lowercase : List[Any] = max(__UpperCAmelCase ) _lowercase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowercase : List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__UpperCAmelCase ) score_lists.append(__UpperCAmelCase ) return score_lists def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__UpperCAmelCase ): _lowercase : List[Any] = final_scores[j] + ele return final_scores def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Any = get_data(__UpperCAmelCase ) _lowercase : Optional[Any] = calculate_each_score(__UpperCAmelCase , __UpperCAmelCase ) _lowercase : Union[str, Any] = generate_final_scores(__UpperCAmelCase ) # append scores to source data for i, ele in enumerate(__UpperCAmelCase ): source_data[i].append(__UpperCAmelCase ) return source_data
336
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: List[str] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model" def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Tuple = intermediate_size _lowercase : List[Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = patch_size _lowercase : Optional[Any] = image_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[Any] = attention_dropout _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : Tuple = qkv_bias @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = hidden_act _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Tuple = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Any = position_embedding_type _lowercase : Dict = cross_attention_frequency _lowercase : Optional[Any] = encoder_hidden_size @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : str = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "instructblip" SCREAMING_SNAKE_CASE_ : List[str] = True def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) if vision_config is None: _lowercase : str = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: _lowercase : Any = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: _lowercase : Optional[int] = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ ) _lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ ) _lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : Union[str, Any] = self.text_config.is_encoder_decoder _lowercase : List[str] = num_query_tokens _lowercase : List[str] = self.vision_config.hidden_size _lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : Union[str, Any] = 1.0 _lowercase : Dict = 0.02 @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowercase : int = self.vision_config.to_dict() _lowercase : Any = self.qformer_config.to_dict() _lowercase : Any = self.text_config.to_dict() _lowercase : Optional[int] = self.__class__.model_type return output
336
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = field( metadata={"help": "The output directory where the model will be written."} , ) SCREAMING_SNAKE_CASE_ : str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : List[Any] = HfArgumentParser((ModelArguments,) ) ((_lowercase) , ) : Tuple = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: _lowercase : Tuple = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: _lowercase : Dict = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: _lowercase : Any = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: _lowercase : Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed _lowercase : str = True _lowercase : Tuple = True _lowercase : int = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens _lowercase : Union[str, Any] = decoder_config.decoder_start_token_id _lowercase : str = decoder_config.pad_token_id if decoder_start_token_id is None: _lowercase : Dict = decoder_config.bos_token_id if pad_token_id is None: _lowercase : Union[str, Any] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work _lowercase : List[Any] = decoder_config.eos_token_id _lowercase : Union[str, Any] = decoder_start_token_id _lowercase : Dict = pad_token_id _lowercase : List[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) _lowercase : List[str] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) _lowercase : Dict = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
"""simple docstring""" UpperCAmelCase: Optional[Any] = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
336
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _lowercase : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowercase : Dict = {"""unk_token""": """<unk>"""} _lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) _lowercase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,UpperCAmelCase_ ) self.assertIn("""attention_mask""" ,UpperCAmelCase_ ) self.assertNotIn("""labels""" ,UpperCAmelCase_ ) self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : List[Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(batch.input_ids.shape ,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = ["""A long paragraph for summarization."""] _lowercase : Dict = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = inputs["""input_ids"""] _lowercase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : str = ["""Summary of the text.""", """Another summary."""] _lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ) _lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]] _lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = """A, <mask> AllenNLP sentence.""" _lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) _lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
336
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCAmelCase_ ,**UpperCAmelCase_ ): pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = pipeline( """zero-shot-object-detection""" ,model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) _lowercase : Optional[Any] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Dict = object_detector(examples[0] ,threshold=0.0 ) _lowercase : int = len(UpperCAmelCase_ ) self.assertGreater(UpperCAmelCase_ ,0 ) self.assertEqual( UpperCAmelCase_ ,[ { """score""": ANY(UpperCAmelCase_ ), """label""": ANY(UpperCAmelCase_ ), """box""": {"""xmin""": ANY(UpperCAmelCase_ ), """ymin""": ANY(UpperCAmelCase_ ), """xmax""": ANY(UpperCAmelCase_ ), """ymax""": ANY(UpperCAmelCase_ )}, } for i in range(UpperCAmelCase_ ) ] ,) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def lowerCamelCase__ ( self ): pass @require_torch def lowerCamelCase__ ( self ): _lowercase : Any = pipeline( """zero-shot-object-detection""" ,model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) _lowercase : List[Any] = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,threshold=0.64 ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] ,) _lowercase : List[str] = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] ,threshold=0.64 ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] ] ,) @require_torch @slow def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = pipeline("""zero-shot-object-detection""" ) _lowercase : int = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ] ,) _lowercase : Optional[int] = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], ] ,) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def lowerCamelCase__ ( self ): pass @require_torch @slow def lowerCamelCase__ ( self ): _lowercase : List[Any] = 0.2 _lowercase : List[str] = pipeline("""zero-shot-object-detection""" ) _lowercase : Optional[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,threshold=UpperCAmelCase_ ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, ] ,) @require_torch @slow def lowerCamelCase__ ( self ): _lowercase : Any = 2 _lowercase : Dict = pipeline("""zero-shot-object-detection""" ) _lowercase : List[str] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,top_k=UpperCAmelCase_ ,) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, ] ,)
336
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase: Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase: Any = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } UpperCAmelCase: List[str] = { """facebook/bart-base""": 1_024, """facebook/bart-large""": 1_024, """facebook/bart-large-mnli""": 1_024, """facebook/bart-large-cnn""": 1_024, """facebook/bart-large-xsum""": 1_024, """yjernite/bart_eli5""": 1_024, } @lru_cache() def __SCREAMING_SNAKE_CASE ( ): _lowercase : Optional[int] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) _lowercase : Tuple = bs[:] _lowercase : Optional[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 _lowercase : List[str] = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = set() _lowercase : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowercase : int = char return pairs class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str = ["input_ids", "attention_mask"] def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_="replace" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=False ,**UpperCAmelCase_ ,): _lowercase : Union[str, Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else bos_token _lowercase : List[str] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else eos_token _lowercase : Optional[Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else sep_token _lowercase : List[str] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else cls_token _lowercase : int = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else unk_token _lowercase : Union[str, Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _lowercase : int = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else mask_token super().__init__( errors=UpperCAmelCase_ ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,**UpperCAmelCase_ ,) with open(UpperCAmelCase_ ,encoding="""utf-8""" ) as vocab_handle: _lowercase : Dict = json.load(UpperCAmelCase_ ) _lowercase : int = {v: k for k, v in self.encoder.items()} _lowercase : int = errors # how to handle errors in decoding _lowercase : Any = bytes_to_unicode() _lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ ,encoding="""utf-8""" ) as merges_handle: _lowercase : List[Any] = merges_handle.read().split("""\n""" )[1:-1] _lowercase : Any = [tuple(merge.split() ) for merge in bpe_merges] _lowercase : Any = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Tuple = {} _lowercase : int = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _lowercase : List[Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def lowerCamelCase__ ( self ): return len(self.encoder ) def lowerCamelCase__ ( self ): return dict(self.encoder ,**self.added_tokens_encoder ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): if token in self.cache: return self.cache[token] _lowercase : Dict = tuple(UpperCAmelCase_ ) _lowercase : List[Any] = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: _lowercase : List[str] = min(UpperCAmelCase_ ,key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break _lowercase , _lowercase : int = bigram _lowercase : int = [] _lowercase : Optional[int] = 0 while i < len(UpperCAmelCase_ ): try: _lowercase : List[str] = word.index(UpperCAmelCase_ ,UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowercase : Tuple = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowercase : Optional[int] = tuple(UpperCAmelCase_ ) _lowercase : Union[str, Any] = new_word if len(UpperCAmelCase_ ) == 1: break else: _lowercase : Dict = get_pairs(UpperCAmelCase_ ) _lowercase : List[str] = """ """.join(UpperCAmelCase_ ) _lowercase : List[str] = word return word def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = [] for token in re.findall(self.pat ,UpperCAmelCase_ ): _lowercase : Dict = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ ,self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : Dict = """""".join(UpperCAmelCase_ ) _lowercase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowercase : Union[str, Any] = os.path.join( UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : Optional[int] = os.path.join( UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=UpperCAmelCase_ ,ensure_ascii=UpperCAmelCase_ ) + """\n""" ) _lowercase : str = 0 with open(UpperCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda UpperCAmelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) _lowercase : Optional[Any] = token_index writer.write(""" """.join(UpperCAmelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase : str = [self.cls_token_id] _lowercase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Optional[int] = [self.sep_token_id] _lowercase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,**UpperCAmelCase_ ): _lowercase : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()): _lowercase : Optional[Any] = """ """ + text return (text, kwargs)
336
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : jnp.ndarray SCREAMING_SNAKE_CASE_ : jnp.ndarray class UpperCamelCase ( nn.Module ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa def lowerCamelCase__ ( self ): _lowercase : Optional[int] = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) _lowercase : Optional[Any] = [] for i in range(len(self.block_out_channels ) - 1 ): _lowercase : Tuple = self.block_out_channels[i] _lowercase : str = self.block_out_channels[i + 1] _lowercase : Optional[int] = nn.Conv( UpperCAmelCase_ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(UpperCAmelCase_ ) _lowercase : Optional[Any] = nn.Conv( UpperCAmelCase_ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(UpperCAmelCase_ ) _lowercase : Dict = blocks _lowercase : Any = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,UpperCAmelCase_ ): _lowercase : int = self.conv_in(UpperCAmelCase_ ) _lowercase : Optional[int] = nn.silu(UpperCAmelCase_ ) for block in self.blocks: _lowercase : Dict = block(UpperCAmelCase_ ) _lowercase : str = nn.silu(UpperCAmelCase_ ) _lowercase : Tuple = self.conv_out(UpperCAmelCase_ ) return embedding @flax_register_to_config class UpperCamelCase ( nn.Module , snake_case , snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 3_2 SCREAMING_SNAKE_CASE_ : int = 4 SCREAMING_SNAKE_CASE_ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) SCREAMING_SNAKE_CASE_ : Union[bool, Tuple[bool]] = False SCREAMING_SNAKE_CASE_ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) SCREAMING_SNAKE_CASE_ : int = 2 SCREAMING_SNAKE_CASE_ : Union[int, Tuple[int]] = 8 SCREAMING_SNAKE_CASE_ : Optional[Union[int, Tuple[int]]] = None SCREAMING_SNAKE_CASE_ : int = 1_2_8_0 SCREAMING_SNAKE_CASE_ : float = 0.0 SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa SCREAMING_SNAKE_CASE_ : bool = True SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : str = "rgb" SCREAMING_SNAKE_CASE_ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # init input tensors _lowercase : Dict = (1, self.in_channels, self.sample_size, self.sample_size) _lowercase : Dict = jnp.zeros(UpperCAmelCase_ ,dtype=jnp.floataa ) _lowercase : Union[str, Any] = jnp.ones((1,) ,dtype=jnp.intaa ) _lowercase : int = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) _lowercase : Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8) _lowercase : List[str] = jnp.zeros(UpperCAmelCase_ ,dtype=jnp.floataa ) _lowercase , _lowercase : Union[str, Any] = jax.random.split(UpperCAmelCase_ ) _lowercase : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )["params"] def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.block_out_channels _lowercase : List[str] = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _lowercase : Any = self.num_attention_heads or self.attention_head_dim # input _lowercase : Optional[int] = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time _lowercase : Optional[int] = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) _lowercase : Optional[int] = FlaxTimestepEmbedding(UpperCAmelCase_ ,dtype=self.dtype ) _lowercase : List[Any] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) _lowercase : Any = self.only_cross_attention if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : int = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Dict = (num_attention_heads,) * len(self.down_block_types ) # down _lowercase : List[Any] = [] _lowercase : int = [] _lowercase : Optional[Any] = block_out_channels[0] _lowercase : Tuple = nn.Conv( UpperCAmelCase_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(UpperCAmelCase_ ) for i, down_block_type in enumerate(self.down_block_types ): _lowercase : List[str] = output_channel _lowercase : Optional[Any] = block_out_channels[i] _lowercase : Dict = i == len(UpperCAmelCase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _lowercase : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: _lowercase : int = FlaxDownBlockaD( in_channels=UpperCAmelCase_ ,out_channels=UpperCAmelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(UpperCAmelCase_ ) for _ in range(self.layers_per_block ): _lowercase : str = nn.Conv( UpperCAmelCase_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(UpperCAmelCase_ ) if not is_final_block: _lowercase : Any = nn.Conv( UpperCAmelCase_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(UpperCAmelCase_ ) _lowercase : Optional[int] = down_blocks _lowercase : List[str] = controlnet_down_blocks # mid _lowercase : List[Any] = block_out_channels[-1] _lowercase : str = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCAmelCase_ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) _lowercase : List[Any] = nn.Conv( UpperCAmelCase_ ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = 1.0 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,): _lowercase : Optional[int] = self.controlnet_conditioning_channel_order if channel_order == "bgr": _lowercase : Dict = jnp.flip(UpperCAmelCase_ ,axis=1 ) # 1. time if not isinstance(UpperCAmelCase_ ,jnp.ndarray ): _lowercase : Any = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(UpperCAmelCase_ ,jnp.ndarray ) and len(timesteps.shape ) == 0: _lowercase : Tuple = timesteps.astype(dtype=jnp.floataa ) _lowercase : Tuple = jnp.expand_dims(UpperCAmelCase_ ,0 ) _lowercase : str = self.time_proj(UpperCAmelCase_ ) _lowercase : str = self.time_embedding(UpperCAmelCase_ ) # 2. pre-process _lowercase : Dict = jnp.transpose(UpperCAmelCase_ ,(0, 2, 3, 1) ) _lowercase : Optional[int] = self.conv_in(UpperCAmelCase_ ) _lowercase : Dict = jnp.transpose(UpperCAmelCase_ ,(0, 2, 3, 1) ) _lowercase : Any = self.controlnet_cond_embedding(UpperCAmelCase_ ) sample += controlnet_cond # 3. down _lowercase : int = (sample,) for down_block in self.down_blocks: if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase , _lowercase : List[str] = down_block(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,deterministic=not train ) else: _lowercase , _lowercase : Any = down_block(UpperCAmelCase_ ,UpperCAmelCase_ ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid _lowercase : Tuple = self.mid_block(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,deterministic=not train ) # 5. contronet blocks _lowercase : Optional[Any] = () for down_block_res_sample, controlnet_block in zip(UpperCAmelCase_ ,self.controlnet_down_blocks ): _lowercase : Dict = controlnet_block(UpperCAmelCase_ ) controlnet_down_block_res_samples += (down_block_res_sample,) _lowercase : Any = controlnet_down_block_res_samples _lowercase : Optional[int] = self.controlnet_mid_block(UpperCAmelCase_ ) # 6. scaling _lowercase : str = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCAmelCase_ ,mid_block_res_sample=UpperCAmelCase_ )
336
"""simple docstring""" UpperCAmelCase: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase: int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
336
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase: Union[str, Any] = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = "vit_msn" def __init__( self ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-06 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Union[str, Any] = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : str = intermediate_size _lowercase : Tuple = hidden_act _lowercase : List[Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : List[str] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : List[Any] = image_size _lowercase : str = patch_size _lowercase : List[Any] = num_channels _lowercase : Any = qkv_bias
336
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 4000000 ): _lowercase : int = [0, 1] _lowercase : Optional[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 _lowercase : Any = 0 for j in range(len(__UpperCAmelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
336
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase: Tuple = [0, 25, 50] UpperCAmelCase: List[Any] = [25, 50, 75] UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca) UpperCAmelCase: Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase: List[Any] = np.ones(75) UpperCAmelCase: Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase: int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase: int = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
336
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase: Tuple = logging.get_logger(__name__) class UpperCamelCase ( snake_case ): """simple docstring""" def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" ,UpperCAmelCase_ ,) super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
336
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if exponent == 1: return base if exponent % 2 == 0: _lowercase : int = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1777 , __UpperCAmelCase = 1855 , __UpperCAmelCase = 8 ): _lowercase : Any = base for _ in range(1 , __UpperCAmelCase ): _lowercase : Optional[Any] = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
336
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase: Optional[int] = { """configuration_informer""": [ """INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: List[Any] = [ """INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """InformerForPrediction""", """InformerModel""", """InformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys UpperCAmelCase: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
336
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = """<s>""" _lowercase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""<eod>""" ) self.assertEqual(len(UpperCAmelCase_ ) ,10_06 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] ) _lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) @slow def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) _lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ ( self ): # fmt: off _lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : list[list[int]] = [[0 for _ in range(__UpperCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): _lowercase : str = 1 for n in range(m + 1 ): for k in range(1 , __UpperCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: UpperCAmelCase: List[str] = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: UpperCAmelCase: Any = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
1
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCamelCase__ ( self ): _lowercase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) _lowercase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) _lowercase : Optional[Any] = """xvjiarui/stable-diffusion-2-inpainting""" _lowercase , _lowercase : Any = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ ) _lowercase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" _lowercase : Optional[Any] = jax.random.PRNGKey(0 ) _lowercase : Tuple = 50 _lowercase : List[str] = jax.device_count() _lowercase : List[str] = num_samples * [prompt] _lowercase : List[Any] = num_samples * [init_image] _lowercase : List[Any] = num_samples * [mask_image] _lowercase , _lowercase , _lowercase : Dict = pipeline.prepare_inputs(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) # shard inputs and rng _lowercase : int = replicate(UpperCAmelCase_ ) _lowercase : Optional[Any] = jax.random.split(UpperCAmelCase_ ,jax.device_count() ) _lowercase : int = shard(UpperCAmelCase_ ) _lowercase : List[str] = shard(UpperCAmelCase_ ) _lowercase : str = shard(UpperCAmelCase_ ) _lowercase : Optional[Any] = pipeline( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,jit=UpperCAmelCase_ ) _lowercase : Dict = output.images.reshape(UpperCAmelCase_ ,5_12 ,5_12 ,3 ) _lowercase : Optional[int] = images[0, 2_53:2_56, 2_53:2_56, -1] _lowercase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowercase : List[Any] = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
336
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = [] for line in lines: _lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments if line: filtered_lines.append(__UpperCAmelCase ) _lowercase : Tuple = """\n""".join(__UpperCAmelCase ) # Make a hash from all this code _lowercase : Tuple = full_str.encode("""utf-8""" ) return shaaaa(__UpperCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase: Tuple = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase: List[str] = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name UpperCAmelCase: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
336
1
"""simple docstring""" UpperCAmelCase: List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCAmelCase: list[bool | None] = [None] * 10_000_000 UpperCAmelCase: int = True UpperCAmelCase: Any = False def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _lowercase : Any = chain(next_number(__UpperCAmelCase ) ) _lowercase : Dict = number_chain while number < 10000000: _lowercase : str = number_chain number *= 10 return number_chain def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10000000 ): for i in range(1 , __UpperCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
336
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
336
1
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase: str = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _lowercase : List[str] = MaskFormerConfig(backbone_config=__UpperCAmelCase ) _lowercase : str = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok _lowercase : str = 847 _lowercase : str = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok _lowercase : Dict = 150 _lowercase : Union[str, Any] = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok _lowercase : Dict = 171 _lowercase : str = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO _lowercase : str = 133 _lowercase : Union[str, Any] = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok _lowercase : Dict = 19 _lowercase : Any = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok _lowercase : str = 65 _lowercase : List[str] = """mapillary-vistas-id2label.json""" _lowercase : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) ) _lowercase : Optional[int] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Any = dct.pop(__UpperCAmelCase ) _lowercase : Any = val def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowercase : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowercase : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _lowercase : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : Optional[int] = in_proj_weight[:dim, :] _lowercase : Optional[Any] = in_proj_bias[: dim] _lowercase : Tuple = in_proj_weight[ dim : dim * 2, : ] _lowercase : List[str] = in_proj_bias[ dim : dim * 2 ] _lowercase : List[Any] = in_proj_weight[ -dim :, : ] _lowercase : int = in_proj_bias[-dim :] # fmt: on def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): # fmt: off _lowercase : List[Any] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowercase : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) _lowercase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : Dict = in_proj_weight[: hidden_size, :] _lowercase : Optional[Any] = in_proj_bias[:config.hidden_size] _lowercase : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :] _lowercase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2] _lowercase : Any = in_proj_weight[-hidden_size :, :] _lowercase : str = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowercase : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) _lowercase : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : str = in_proj_weight[: hidden_size, :] _lowercase : str = in_proj_bias[:config.hidden_size] _lowercase : Any = in_proj_weight[hidden_size : hidden_size * 2, :] _lowercase : str = in_proj_bias[hidden_size : hidden_size * 2] _lowercase : Union[str, Any] = in_proj_weight[-hidden_size :, :] _lowercase : int = in_proj_bias[-hidden_size :] # fmt: on def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowercase : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): _lowercase : Union[str, Any] = get_maskformer_config(__UpperCAmelCase ) # load original state_dict with open(__UpperCAmelCase , """rb""" ) as f: _lowercase : Any = pickle.load(__UpperCAmelCase ) _lowercase : str = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _lowercase : Optional[Any] = create_rename_keys(__UpperCAmelCase ) for src, dest in rename_keys: rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) read_in_swin_q_k_v(__UpperCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(__UpperCAmelCase , __UpperCAmelCase ) # update to torch tensors for key, value in state_dict.items(): _lowercase : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ) # load 🤗 model _lowercase : str = MaskFormerForInstanceSegmentation(__UpperCAmelCase ) model.eval() for name, param in model.named_parameters(): print(__UpperCAmelCase , param.shape ) _lowercase , _lowercase : List[str] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results _lowercase : Tuple = prepare_img() if "vistas" in model_name: _lowercase : int = 65 elif "cityscapes" in model_name: _lowercase : List[str] = 65535 else: _lowercase : Any = 255 _lowercase : List[str] = True if """ade""" in model_name else False _lowercase : int = MaskFormerImageProcessor(ignore_index=__UpperCAmelCase , reduce_labels=__UpperCAmelCase ) _lowercase : Optional[int] = image_processor(__UpperCAmelCase , return_tensors="""pt""" ) _lowercase : Union[str, Any] = model(**__UpperCAmelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _lowercase : Union[str, Any] = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) model.save_pretrained(__UpperCAmelCase ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase: str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase: Optional[int] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
336
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase: List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase: int = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
336
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowercase : str = [] for i in range(__UpperCAmelCase ): _lowercase : Any = i / num_diffusion_timesteps _lowercase : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCamelCase ( snake_case , snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ : str = 2 @register_to_config def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,): if trained_betas is not None: _lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "linear": _lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowercase : Any = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) _lowercase : Tuple = 1.0 - self.betas _lowercase : Dict = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ): if schedule_timesteps is None: _lowercase : Optional[int] = self.timesteps _lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0 else: _lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep _lowercase : List[str] = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCamelCase__ ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,): _lowercase : str = self.index_for_timestep(UpperCAmelCase_ ) if self.state_in_first_order: _lowercase : Optional[Any] = self.sigmas[step_index] else: _lowercase : Dict = self.sigmas_interpol[step_index] _lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,): _lowercase : List[str] = num_inference_steps _lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _lowercase : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) _lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ ) _lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) # interpolate sigmas _lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() _lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _lowercase : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(UpperCAmelCase_ ).startswith("""mps""" ): # mps does not support float64 _lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa ) else: _lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) # interpolate timesteps _lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype ) _lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() _lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] ) _lowercase : List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # get log sigma _lowercase : Optional[Any] = sigma.log() # get distribution _lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None] # get sigmas range _lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _lowercase : List[Any] = low_idx + 1 _lowercase : int = self.log_sigmas[low_idx] _lowercase : Any = self.log_sigmas[high_idx] # interpolate sigmas _lowercase : Any = (low - log_sigma) / (low - high) _lowercase : Dict = w.clamp(0 ,1 ) # transform interpolation to time range _lowercase : List[str] = (1 - w) * low_idx + w * high_idx _lowercase : Optional[int] = t.view(sigma.shape ) return t @property def lowerCamelCase__ ( self ): return self.sample is None def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,): _lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ ) # advance index counter by 1 _lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _lowercase : Any = self.sigmas[step_index] _lowercase : Any = self.sigmas_interpol[step_index + 1] _lowercase : Tuple = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _lowercase : Union[str, Any] = self.sigmas[step_index - 1] _lowercase : int = self.sigmas_interpol[step_index] _lowercase : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _lowercase : Any = 0 _lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _lowercase : List[str] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _lowercase : Any = sigma_interpol - sigma_hat # store for 2nd order step _lowercase : List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _lowercase : Optional[Any] = sigma_next - sigma_hat _lowercase : Any = self.sample _lowercase : Optional[int] = None _lowercase : str = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples _lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ): # mps does not support float64 _lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) _lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: _lowercase : List[Any] = self.timesteps.to(original_samples.device ) _lowercase : Union[str, Any] = timesteps.to(original_samples.device ) _lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps] _lowercase : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _lowercase : List[Any] = sigma.unsqueeze(-1 ) _lowercase : int = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if number > 0: raise ValueError("""input must be a negative integer""" ) _lowercase : List[Any] = len(bin(__UpperCAmelCase )[3:] ) _lowercase : Optional[Any] = bin(abs(__UpperCAmelCase ) - (1 << binary_number_length) )[3:] _lowercase : List[Any] = ( ( """1""" + """0""" * (binary_number_length - len(__UpperCAmelCase )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import pprint import requests UpperCAmelCase: Tuple = """https://zenquotes.io/api""" def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCAmelCase: int = random_quotes() pprint.pprint(response)
336
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase: int = logging.get_logger(__name__) UpperCAmelCase: Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } UpperCAmelCase: Dict = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _lowercase : Dict = """lm_head""" _lowercase : List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ) if weight_type is not None: _lowercase : str = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape else: _lowercase : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : str = value elif weight_type == "weight_g": _lowercase : Tuple = value elif weight_type == "weight_v": _lowercase : Optional[Any] = value elif weight_type == "bias": _lowercase : Optional[int] = value else: _lowercase : int = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[Any] = [] _lowercase : Any = fairseq_model.state_dict() _lowercase : Optional[Any] = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Any = False if "conv_layers" in name: load_conv_layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) _lowercase : Optional[int] = True else: for key, mapped_key in MAPPING.items(): _lowercase : Dict = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _lowercase : Optional[Any] = True if "*" in mapped_key: _lowercase : str = name.split(__UpperCAmelCase )[0].split(""".""" )[-2] _lowercase : Any = mapped_key.replace("""*""" , __UpperCAmelCase ) if "weight_g" in name: _lowercase : Tuple = """weight_g""" elif "weight_v" in name: _lowercase : Union[str, Any] = """weight_v""" elif "bias" in name: _lowercase : Dict = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj _lowercase : Any = """weight""" else: _lowercase : Optional[int] = None set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) continue if not is_used: unused_weights.append(__UpperCAmelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Dict = full_name.split("""conv_layers.""" )[-1] _lowercase : List[str] = name.split(""".""" ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Optional[int] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : Optional[int] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCAmelCase ) @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ): if config_path is not None: _lowercase : List[Any] = UniSpeechConfig.from_pretrained(__UpperCAmelCase ) else: _lowercase : Dict = UniSpeechConfig() if is_finetuned: if dict_path: _lowercase : int = Dictionary.load_from_json(__UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : int = target_dict.pad_index _lowercase : Any = target_dict.bos_index _lowercase : Optional[int] = target_dict.eos_index _lowercase : Optional[int] = len(target_dict.symbols ) _lowercase : Dict = os.path.join(__UpperCAmelCase , """vocab.json""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) ) return os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _lowercase : List[str] = target_dict.indices # fairseq has the <pad> and <s> switched _lowercase : Any = 42 _lowercase : List[Any] = 43 with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(__UpperCAmelCase , __UpperCAmelCase ) _lowercase : Optional[int] = WavaVecaPhonemeCTCTokenizer( __UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , ) _lowercase : Optional[Any] = True if config.feat_extract_norm == """layer""" else False _lowercase : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) _lowercase : List[str] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) processor.save_pretrained(__UpperCAmelCase ) _lowercase : Dict = UniSpeechForCTC(__UpperCAmelCase ) else: _lowercase : Dict = UniSpeechForPreTraining(__UpperCAmelCase ) if is_finetuned: _lowercase , _lowercase , _lowercase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : List[Any] = model[0].eval() recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) hf_unispeech.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase: str = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
336
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : int def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _lowercase : Tuple = all_rotations(__UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCAmelCase ), } return response def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _lowercase : Optional[Any] = int(__UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _lowercase : int = [""""""] * len(__UpperCAmelCase ) for _ in range(len(__UpperCAmelCase ) ): for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """ UpperCAmelCase: int = input(entry_msg).strip() UpperCAmelCase: List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__UpperCAmelCase , n - 1 , __UpperCAmelCase ) * a) % mod else: _lowercase : Dict = binary_exponentiation(__UpperCAmelCase , n / 2 , __UpperCAmelCase ) return (b * b) % mod # a prime number UpperCAmelCase: Optional[int] = 701 UpperCAmelCase: List[str] = 1_000_000_000 UpperCAmelCase: Union[str, Any] = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
336
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
1
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCamelCase ( snake_case ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase_ ,"""hidden_sizes""" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ ,"""num_attention_heads""" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ ,"""num_encoder_blocks""" ) ) class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=64 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=[2, 2, 2, 2] ,UpperCAmelCase_=[8, 4, 2, 1] ,UpperCAmelCase_=[16, 32, 64, 1_28] ,UpperCAmelCase_=[1, 4, 8, 16] ,UpperCAmelCase_=[1, 2, 4, 8] ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,): _lowercase : int = parent _lowercase : Dict = batch_size _lowercase : Optional[int] = image_size _lowercase : List[Any] = num_channels _lowercase : Union[str, Any] = num_encoder_blocks _lowercase : Any = sr_ratios _lowercase : str = depths _lowercase : List[Any] = hidden_sizes _lowercase : str = downsampling_rates _lowercase : List[str] = num_attention_heads _lowercase : Optional[int] = is_training _lowercase : Optional[int] = use_labels _lowercase : Any = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Any = initializer_range _lowercase : List[Any] = num_labels _lowercase : Union[str, Any] = scope def lowerCamelCase__ ( self ): _lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : Tuple = None if self.use_labels: _lowercase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) _lowercase : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): return SegformerConfig( image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = SegformerModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowercase : Dict = model(UpperCAmelCase_ ) _lowercase : Tuple = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[int] = self.num_labels _lowercase : int = SegformerForSemanticSegmentation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowercase : Any = model(UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) _lowercase : List[Any] = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss ,0.0 ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[int] = 1 _lowercase : Optional[Any] = SegformerForSemanticSegmentation(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowercase : Dict = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(UpperCAmelCase_ ) _lowercase : List[Any] = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ ) self.parent.assertGreater(result.loss ,0.0 ) def lowerCamelCase__ ( self ): _lowercase : str = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs _lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Optional[int] = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : Any = False def lowerCamelCase__ ( self ): _lowercase : List[Any] = SegformerModelTester(self ) _lowercase : List[Any] = SegformerConfigTester(self ,config_class=UpperCAmelCase_ ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase_ ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def lowerCamelCase__ ( self ): pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : int = model_class(UpperCAmelCase_ ) _lowercase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Union[str, Any] = [*signature.parameters.keys()] _lowercase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : List[str] = True for model_class in self.all_model_classes: _lowercase : Any = True _lowercase : Optional[int] = False _lowercase : int = True _lowercase : Dict = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowercase : Any = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ) _lowercase : Union[str, Any] = outputs.attentions _lowercase : List[str] = sum(self.model_tester.depths ) self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowercase : Optional[Any] = True _lowercase : Tuple = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowercase : str = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ) _lowercase : str = outputs.attentions self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ ) # verify the first attentions (first block, first layer) _lowercase : Optional[Any] = (self.model_tester.image_size // 4) ** 2 _lowercase : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) # verify the last attentions (last block, last layer) _lowercase : List[Any] = (self.model_tester.image_size // 32) ** 2 _lowercase : int = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,) _lowercase : Optional[Any] = len(UpperCAmelCase_ ) # Check attention is always last and order is fine _lowercase : List[Any] = True _lowercase : str = True _lowercase : Tuple = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowercase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ) self.assertEqual(out_len + 1 ,len(UpperCAmelCase_ ) ) _lowercase : List[str] = outputs.attentions self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ ) # verify the first attentions (first block, first layer) _lowercase : Any = (self.model_tester.image_size // 4) ** 2 _lowercase : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) def lowerCamelCase__ ( self ): def check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : int = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): _lowercase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ) _lowercase : Any = outputs.hidden_states _lowercase : Dict = self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : str = True check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : List[Any] = True check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): if not self.model_tester.is_training: return _lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : str = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase_ ): continue _lowercase : int = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() _lowercase : Tuple = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ ) _lowercase : Optional[Any] = model(**UpperCAmelCase_ ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCamelCase__ ( self ): pass @slow def lowerCamelCase__ ( self ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[Any] = SegformerModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self ): # only resize + normalize _lowercase : Dict = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=UpperCAmelCase_ ,align=UpperCAmelCase_ ,do_random_crop=UpperCAmelCase_ ) _lowercase : int = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( UpperCAmelCase_ ) _lowercase : Union[str, Any] = prepare_img() _lowercase : Union[str, Any] = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Optional[int] = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowercase : Any = model(UpperCAmelCase_ ) _lowercase : Dict = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ ) _lowercase : int = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 ) ) @slow def lowerCamelCase__ ( self ): # only resize + normalize _lowercase : str = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=UpperCAmelCase_ ,align=UpperCAmelCase_ ,do_random_crop=UpperCAmelCase_ ) _lowercase : Optional[int] = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCAmelCase_ ) _lowercase : Dict = prepare_img() _lowercase : List[str] = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Dict = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowercase : Optional[Any] = model(UpperCAmelCase_ ) _lowercase : int = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ ) _lowercase : Any = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,UpperCAmelCase_ ,atol=1E-1 ) ) @slow def lowerCamelCase__ ( self ): # only resize + normalize _lowercase : List[str] = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=UpperCAmelCase_ ,align=UpperCAmelCase_ ,do_random_crop=UpperCAmelCase_ ) _lowercase : Any = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( UpperCAmelCase_ ) _lowercase : Optional[int] = prepare_img() _lowercase : List[str] = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : int = encoded_inputs.pixel_values.to(UpperCAmelCase_ ) with torch.no_grad(): _lowercase : Any = model(UpperCAmelCase_ ) _lowercase : str = outputs.logits.detach().cpu() _lowercase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ ,target_sizes=[(5_00, 3_00)] ) _lowercase : Tuple = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ ) _lowercase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ ) _lowercase : List[Any] = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape ,UpperCAmelCase_ )
336
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCAmelCase: Any = logging.getLogger(__name__) @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Whether tp freeze the encoder."} ) SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=1_0_2_4 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=1_2_8 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=1_4_2 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=1_4_2 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=snake_case , metadata={"help": "Source language id for translation."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=snake_case , metadata={"help": "Target language id for translation."} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field(default=snake_case , metadata={"help": "# num_beams to use for evaluation."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , F"""{split}_results.json""" ) ) def __SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowercase , _lowercase , _lowercase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowercase , _lowercase , _lowercase : Optional[int] = parser.parse_args_into_dataclasses() check_output_dir(__UpperCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowercase : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowercase : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(__UpperCAmelCase , __UpperCAmelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) _lowercase : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _lowercase : Dict = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__UpperCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: _lowercase : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: _lowercase : Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__UpperCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) _lowercase : Optional[int] = SeqaSeqDataset # Get datasets _lowercase : int = ( dataset_class( __UpperCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) _lowercase : int = ( dataset_class( __UpperCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) _lowercase : Tuple = ( dataset_class( __UpperCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer _lowercase : List[Any] = ( build_compute_metrics_fn(data_args.task , __UpperCAmelCase ) if training_args.predict_with_generate else None ) _lowercase : Tuple = SeqaSeqTrainer( model=__UpperCAmelCase , args=__UpperCAmelCase , data_args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , data_collator=SeqaSeqDataCollator( __UpperCAmelCase , __UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) _lowercase : int = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) _lowercase : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) _lowercase : Optional[int] = train_result.metrics _lowercase : Tuple = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , __UpperCAmelCase , training_args.output_dir ) all_metrics.update(__UpperCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _lowercase : List[Any] = trainer.evaluate(metric_key_prefix="""val""" ) _lowercase : Any = data_args.n_val _lowercase : Any = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , __UpperCAmelCase , training_args.output_dir ) all_metrics.update(__UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) _lowercase : Any = trainer.predict(test_dataset=__UpperCAmelCase , metric_key_prefix="""test""" ) _lowercase : int = test_output.metrics _lowercase : Optional[int] = data_args.n_test if trainer.is_world_process_zero(): _lowercase : Union[str, Any] = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , __UpperCAmelCase , training_args.output_dir ) all_metrics.update(__UpperCAmelCase ) if training_args.predict_with_generate: _lowercase : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) _lowercase : Any = lmap(str.strip , __UpperCAmelCase ) write_txt_file(__UpperCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(__UpperCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
336
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase: Tuple = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Tuple = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,): _lowercase : Dict = vocab_size _lowercase : List[str] = action_weight _lowercase : int = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : Any = action_dim _lowercase : List[str] = observation_dim _lowercase : Union[str, Any] = transition_dim _lowercase : str = learning_rate _lowercase : Tuple = n_layer _lowercase : Optional[int] = n_head _lowercase : List[str] = n_embd _lowercase : List[str] = embd_pdrop _lowercase : Optional[Any] = attn_pdrop _lowercase : List[Any] = resid_pdrop _lowercase : str = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : List[Any] = kaiming_initializer_range _lowercase : List[Any] = use_cache super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
336
1
"""simple docstring""" from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Dict = len(__UpperCAmelCase ) # We need to create solution object to save path. _lowercase : int = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] _lowercase : Optional[Any] = run_maze(__UpperCAmelCase , 0 , 0 , __UpperCAmelCase ) if solved: print("""\n""".join(str(__UpperCAmelCase ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : List[Any] = len(__UpperCAmelCase ) # Final check point. if i == j == (size - 1): _lowercase : Any = 1 return True _lowercase : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds _lowercase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _lowercase : str = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _lowercase : Optional[int] = 1 # check for directions if ( run_maze(__UpperCAmelCase , i + 1 , __UpperCAmelCase , __UpperCAmelCase ) or run_maze(__UpperCAmelCase , __UpperCAmelCase , j + 1 , __UpperCAmelCase ) or run_maze(__UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase ) or run_maze(__UpperCAmelCase , __UpperCAmelCase , j - 1 , __UpperCAmelCase ) ): return True _lowercase : Optional[Any] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: List[str] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model" def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Tuple = intermediate_size _lowercase : List[Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = patch_size _lowercase : Optional[Any] = image_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[Any] = attention_dropout _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : Tuple = qkv_bias @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = hidden_act _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Tuple = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Any = position_embedding_type _lowercase : Dict = cross_attention_frequency _lowercase : Optional[Any] = encoder_hidden_size @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : str = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "instructblip" SCREAMING_SNAKE_CASE_ : List[str] = True def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) if vision_config is None: _lowercase : str = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: _lowercase : Any = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: _lowercase : Optional[int] = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ ) _lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ ) _lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : Union[str, Any] = self.text_config.is_encoder_decoder _lowercase : List[str] = num_query_tokens _lowercase : List[str] = self.vision_config.hidden_size _lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : Union[str, Any] = 1.0 _lowercase : Dict = 0.02 @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowercase : int = self.vision_config.to_dict() _lowercase : Any = self.qformer_config.to_dict() _lowercase : Any = self.text_config.to_dict() _lowercase : Optional[int] = self.__class__.model_type return output
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(__UpperCAmelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests UpperCAmelCase: Optional[Any] = """https://api.github.com""" # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user UpperCAmelCase: Union[str, Any] = BASE_URL + """/user""" # https://github.com/settings/tokens UpperCAmelCase: List[str] = os.environ.get("""USER_TOKEN""", """""") def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : List[Any] = { """Authorization""": F"""token {auth_token}""", """Accept""": """application/vnd.github.v3+json""", } return requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F'{key}: {value}') else: raise ValueError("""'USER_TOKEN' field cannot be empty.""")
336
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _lowercase : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowercase : Dict = {"""unk_token""": """<unk>"""} _lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) _lowercase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,UpperCAmelCase_ ) self.assertIn("""attention_mask""" ,UpperCAmelCase_ ) self.assertNotIn("""labels""" ,UpperCAmelCase_ ) self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : List[Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(batch.input_ids.shape ,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = ["""A long paragraph for summarization."""] _lowercase : Dict = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = inputs["""input_ids"""] _lowercase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : str = ["""Summary of the text.""", """Another summary."""] _lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ) _lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]] _lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = """A, <mask> AllenNLP sentence.""" _lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) _lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
336
1
"""simple docstring""" from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[Any] = 0 _lowercase : Optional[int] = len(__UpperCAmelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _lowercase : Dict = i + 1 else: _lowercase : Optional[int] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
336
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase: Tuple = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Optional[int] = DPTConfig() if "large" in checkpoint_url: _lowercase : str = 1024 _lowercase : List[Any] = 4096 _lowercase : Union[str, Any] = 24 _lowercase : Tuple = 16 _lowercase : List[Any] = [5, 11, 17, 23] _lowercase : Union[str, Any] = [256, 512, 1024, 1024] _lowercase : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: _lowercase : Optional[int] = True _lowercase : Union[str, Any] = 150 _lowercase : List[Any] = """huggingface/label-files""" _lowercase : Tuple = """ade20k-id2label.json""" _lowercase : Tuple = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) _lowercase : Tuple = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} _lowercase : str = idalabel _lowercase : Any = {v: k for k, v in idalabel.items()} _lowercase : Union[str, Any] = [1, 150, 480, 480] return config, expected_shape def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): _lowercase : Optional[Any] = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: _lowercase : List[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: _lowercase : str = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: _lowercase : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: _lowercase : Any = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: _lowercase : Dict = name.replace("""proj""" , """projection""" ) if "blocks" in name: _lowercase : Union[str, Any] = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: _lowercase : Any = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowercase : int = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: _lowercase : List[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowercase : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: _lowercase : Union[str, Any] = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: _lowercase : Union[str, Any] = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: _lowercase : str = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: _lowercase : Tuple = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: _lowercase : str = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: _lowercase : List[str] = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: _lowercase : Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 _lowercase : int = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: _lowercase : int = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: _lowercase : List[str] = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: _lowercase : Dict = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: _lowercase : Dict = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: _lowercase : Any = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: _lowercase : Dict = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: _lowercase : Tuple = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: _lowercase : List[str] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: _lowercase : str = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: _lowercase : List[str] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: _lowercase : List[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: _lowercase : Any = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: _lowercase : str = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: _lowercase : List[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: _lowercase : List[Any] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: _lowercase : List[str] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: _lowercase : Optional[int] = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: _lowercase : str = name.replace("""bn""" , """batch_norm""" ) if "head" in name: _lowercase : str = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: _lowercase : Tuple = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: _lowercase : str = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowercase : Dict = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) _lowercase : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : List[str] = in_proj_weight[: config.hidden_size, :] _lowercase : str = in_proj_bias[: config.hidden_size] _lowercase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowercase : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowercase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] _lowercase : Union[str, Any] = in_proj_bias[-config.hidden_size :] def __SCREAMING_SNAKE_CASE ( ): _lowercase : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowercase : str = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase , _lowercase : List[Any] = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL _lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): _lowercase : int = state_dict.pop(__UpperCAmelCase ) _lowercase : List[Any] = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase ) # load HuggingFace model _lowercase : int = DPTForSemanticSegmentation(__UpperCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image _lowercase : Union[str, Any] = 480 if """ade""" in checkpoint_url else 384 _lowercase : List[str] = DPTImageProcessor(size=__UpperCAmelCase ) _lowercase : Union[str, Any] = prepare_img() _lowercase : int = image_processor(__UpperCAmelCase , return_tensors="""pt""" ) # forward pass _lowercase : List[str] = model(**__UpperCAmelCase ).logits if """ade""" in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits _lowercase : Tuple = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: _lowercase : int = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , ) if __name__ == "__main__": UpperCAmelCase: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) UpperCAmelCase: Tuple = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
336
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = abs(__UpperCAmelCase ) _lowercase : int = 0 while n > 0: res += n % 10 n //= 10 return res def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : List[str] = abs(__UpperCAmelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return sum(int(__UpperCAmelCase ) for c in str(abs(__UpperCAmelCase ) ) ) def __SCREAMING_SNAKE_CASE ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase ) -> None: _lowercase : Any = F"""{func.__name__}({value})""" _lowercase : List[str] = timeit(F"""__main__.{call}""" , setup="""import __main__""" ) print(F"""{call:56} = {func(__UpperCAmelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__UpperCAmelCase , __UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
"""simple docstring""" UpperCAmelCase: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase: int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
336
1
"""simple docstring""" import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def lowerCamelCase__ ( self ,UpperCAmelCase_=0 ): _lowercase : Optional[int] = np.random.RandomState(UpperCAmelCase_ ) _lowercase : Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCamelCase__ ( self ): _lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : List[Any] = self.get_dummy_inputs() _lowercase : List[Any] = pipe(**UpperCAmelCase_ ).images _lowercase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : Tuple = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) _lowercase : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Union[str, Any] = self.get_dummy_inputs() _lowercase : Optional[Any] = pipe(**UpperCAmelCase_ ).images _lowercase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : int = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) _lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Dict = self.get_dummy_inputs() _lowercase : Optional[int] = pipe(**UpperCAmelCase_ ).images _lowercase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) _lowercase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : str = self.get_dummy_inputs() _lowercase : str = pipe(**UpperCAmelCase_ ).images _lowercase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : str = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) _lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Optional[Any] = self.get_dummy_inputs() _lowercase : Union[str, Any] = pipe(**UpperCAmelCase_ ).images _lowercase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : int = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) _lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : List[Any] = self.get_dummy_inputs() _lowercase : int = pipe(**UpperCAmelCase_ ).images _lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowercase : List[str] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): _lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : str = self.get_dummy_inputs() _lowercase : Optional[Any] = 3 * [inputs["""prompt"""]] # forward _lowercase : str = pipe(**UpperCAmelCase_ ) _lowercase : Optional[int] = output.images[0, -3:, -3:, -1] _lowercase : List[str] = self.get_dummy_inputs() _lowercase : List[Any] = 3 * [inputs.pop("""prompt""" )] _lowercase : Optional[int] = pipe.tokenizer( UpperCAmelCase_ ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=UpperCAmelCase_ ,return_tensors="""np""" ,) _lowercase : Tuple = text_inputs["""input_ids"""] _lowercase : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] _lowercase : str = prompt_embeds # forward _lowercase : List[Any] = pipe(**UpperCAmelCase_ ) _lowercase : Tuple = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def lowerCamelCase__ ( self ): _lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Tuple = self.get_dummy_inputs() _lowercase : List[Any] = 3 * ["""this is a negative prompt"""] _lowercase : int = negative_prompt _lowercase : Tuple = 3 * [inputs["""prompt"""]] # forward _lowercase : Dict = pipe(**UpperCAmelCase_ ) _lowercase : List[str] = output.images[0, -3:, -3:, -1] _lowercase : Optional[int] = self.get_dummy_inputs() _lowercase : Optional[int] = 3 * [inputs.pop("""prompt""" )] _lowercase : Optional[Any] = [] for p in [prompt, negative_prompt]: _lowercase : List[Any] = pipe.tokenizer( UpperCAmelCase_ ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=UpperCAmelCase_ ,return_tensors="""np""" ,) _lowercase : Optional[Any] = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) _lowercase , _lowercase : Dict = embeds # forward _lowercase : List[Any] = pipe(**UpperCAmelCase_ ) _lowercase : Any = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase__ ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = ort.SessionOptions() _lowercase : Any = False return options def lowerCamelCase__ ( self ): # using the PNDM scheduler by default _lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : List[Any] = """A painting of a squirrel eating a burger""" np.random.seed(0 ) _lowercase : List[Any] = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" ) _lowercase : Union[str, Any] = output.images _lowercase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _lowercase : Optional[int] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase__ ( self ): _lowercase : Any = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) _lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Dict = """open neural network exchange""" _lowercase : Optional[Any] = np.random.RandomState(0 ) _lowercase : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=UpperCAmelCase_ ,output_type="""np""" ) _lowercase : Optional[Any] = output.images _lowercase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _lowercase : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase__ ( self ): _lowercase : str = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) _lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Dict = """open neural network exchange""" _lowercase : Dict = np.random.RandomState(0 ) _lowercase : List[Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=UpperCAmelCase_ ,output_type="""np""" ) _lowercase : Optional[Any] = output.images _lowercase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _lowercase : Dict = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase__ ( self ): _lowercase : List[str] = 0 def test_callback_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None: _lowercase : int = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) _lowercase : List[str] = latents[0, -3:, -3:, -1] _lowercase : Union[str, Any] = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) _lowercase : str = latents[0, -3:, -3:, -1] _lowercase : List[Any] = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 _lowercase : Tuple = False _lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Optional[int] = """Andromeda galaxy in a bottle""" _lowercase : Dict = np.random.RandomState(0 ) pipe( prompt=UpperCAmelCase_ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=UpperCAmelCase_ ,callback=UpperCAmelCase_ ,callback_steps=1 ,) assert test_callback_fn.has_been_called assert number_of_steps == 6 def lowerCamelCase__ ( self ): _lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) assert isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) assert pipe.safety_checker is None _lowercase : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_ ) _lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None _lowercase : Any = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None
336
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=3 ,UpperCAmelCase_=32 ,UpperCAmelCase_=3 ,UpperCAmelCase_=10 ,UpperCAmelCase_=[10, 20, 30, 40] ,UpperCAmelCase_=[1, 1, 2, 1] ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_="relu" ,UpperCAmelCase_=3 ,UpperCAmelCase_=None ,): _lowercase : List[str] = parent _lowercase : str = batch_size _lowercase : Tuple = image_size _lowercase : Dict = num_channels _lowercase : List[str] = embeddings_size _lowercase : str = hidden_sizes _lowercase : List[Any] = depths _lowercase : Optional[Any] = is_training _lowercase : int = use_labels _lowercase : List[Any] = hidden_act _lowercase : Optional[Any] = num_labels _lowercase : str = scope _lowercase : Tuple = len(UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : List[Any] = None if self.use_labels: _lowercase : Dict = ids_tensor([self.batch_size] ,self.num_labels ) _lowercase : List[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Dict = TFRegNetModel(config=UpperCAmelCase_ ) _lowercase : Union[str, Any] = model(UpperCAmelCase_ ,training=UpperCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.num_labels _lowercase : Optional[int] = TFRegNetForImageClassification(UpperCAmelCase_ ) _lowercase : Dict = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ ,training=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Optional[int] = config_and_inputs _lowercase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : List[Any] = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : Any = False def lowerCamelCase__ ( self ): _lowercase : Dict = TFRegNetModelTester(self ) _lowercase : List[Any] = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ) def lowerCamelCase__ ( self ): return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowerCamelCase__ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) @slow def lowerCamelCase__ ( self ): super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[int] = model_class(UpperCAmelCase_ ) _lowercase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Optional[Any] = [*signature.parameters.keys()] _lowercase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): def check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : List[Any] = model_class(UpperCAmelCase_ ) _lowercase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ,training=UpperCAmelCase_ ) _lowercase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowercase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase_ ) ,expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,) _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Optional[int] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: _lowercase : List[Any] = layer_type _lowercase : List[str] = True check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : List[str] = True check_hidden_states_output(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_={} ): _lowercase : Dict = model(UpperCAmelCase_ ,return_dict=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Tuple = model(UpperCAmelCase_ ,return_dict=UpperCAmelCase_ ,**UpperCAmelCase_ ).to_tuple() def recursive_check(UpperCAmelCase_ ,UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ ,(List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ ,UpperCAmelCase_ ): recursive_check(UpperCAmelCase_ ,UpperCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(UpperCAmelCase_ ,UpperCAmelCase_ ) ) ,msg=( """Tuple and dict output are not equal. Difference:""" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) ,) recursive_check(UpperCAmelCase_ ,UpperCAmelCase_ ) for model_class in self.all_model_classes: _lowercase : Any = model_class(UpperCAmelCase_ ) _lowercase : int = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Any = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ ) _lowercase : Dict = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Any = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,{"""output_hidden_states""": True} ) _lowercase : Tuple = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ ) _lowercase : Dict = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,{"""output_hidden_states""": True} ) def lowerCamelCase__ ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) @slow def lowerCamelCase__ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[str] = TFRegNetModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self ): _lowercase : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowercase : int = self.default_image_processor _lowercase : List[str] = prepare_img() _lowercase : Union[str, Any] = image_processor(images=UpperCAmelCase_ ,return_tensors="""tf""" ) # forward pass _lowercase : List[str] = model(**UpperCAmelCase_ ,training=UpperCAmelCase_ ) # verify the logits _lowercase : Tuple = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ ) _lowercase : Union[str, Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 )
336
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase: Tuple = [0, 25, 50] UpperCAmelCase: List[Any] = [25, 50, 75] UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca) UpperCAmelCase: Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase: List[Any] = np.ones(75) UpperCAmelCase: Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase: int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase: int = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
336
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCAmelCase: Any = logging.get_logger(__name__) @dataclass class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self ,**UpperCAmelCase_ ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _lowercase : str = deprecated_arg[3:] _lowercase : Optional[int] = not kwargs.pop(UpperCAmelCase_ ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) _lowercase : List[Any] = kwargs.pop("""tpu_name""" ,self.tpu_name ) _lowercase : Optional[Any] = kwargs.pop("""device_idx""" ,self.device_idx ) _lowercase : Dict = kwargs.pop("""eager_mode""" ,self.eager_mode ) _lowercase : List[str] = kwargs.pop("""use_xla""" ,self.use_xla ) super().__init__(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : str = field( default=snake_case , metadata={"help": "Name of TPU"} , ) SCREAMING_SNAKE_CASE_ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) SCREAMING_SNAKE_CASE_ : bool = field(default=snake_case , metadata={"help": "Benchmark models in eager model."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) _lowercase : Union[str, Any] = None if self.tpu: try: if self.tpu_name: _lowercase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _lowercase : int = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _lowercase : Optional[int] = None return tpu @cached_property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _lowercase : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" ) _lowercase : str = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU _lowercase : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" ) return strategy @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_tpu is not None @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_strategy @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def lowerCamelCase__ ( self ): requires_backends(self ,["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def lowerCamelCase__ ( self ): return self.n_gpu > 0
336
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=0.6 ,UpperCAmelCase_=None ,): _lowercase : Dict = parent _lowercase : List[Any] = batch_size _lowercase : List[str] = image_size _lowercase : Any = patch_size _lowercase : Union[str, Any] = num_channels _lowercase : Optional[int] = is_training _lowercase : int = use_labels _lowercase : List[str] = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : List[Any] = intermediate_size _lowercase : List[Any] = hidden_act _lowercase : List[Any] = hidden_dropout_prob _lowercase : int = attention_probs_dropout_prob _lowercase : Union[str, Any] = type_sequence_label_size _lowercase : Tuple = initializer_range _lowercase : Optional[int] = mask_ratio _lowercase : List[str] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowercase : str = (image_size // patch_size) ** 2 _lowercase : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase__ ( self ): _lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : str = None if self.use_labels: _lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowercase : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Any = TFViTMAEModel(config=UpperCAmelCase_ ) _lowercase : Tuple = model(UpperCAmelCase_ ,training=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : int = TFViTMAEForPreTraining(UpperCAmelCase_ ) _lowercase : Any = model(UpperCAmelCase_ ,training=UpperCAmelCase_ ) # expected sequence length = num_patches _lowercase : Optional[int] = (self.image_size // self.patch_size) ** 2 _lowercase : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowercase : List[Any] = 1 _lowercase : List[Any] = TFViTMAEForPreTraining(UpperCAmelCase_ ) _lowercase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowercase : Tuple = model(UpperCAmelCase_ ,training=UpperCAmelCase_ ) _lowercase : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase__ ( self ): _lowercase : int = self.prepare_config_and_inputs() ((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = config_and_inputs _lowercase : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Optional[Any] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : str = False SCREAMING_SNAKE_CASE_ : int = False def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = TFViTMAEModelTester(self ) _lowercase : Optional[int] = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : str = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) _lowercase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,tf.keras.layers.Layer ) ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : List[str] = model_class(UpperCAmelCase_ ) _lowercase : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Optional[Any] = [*signature.parameters.keys()] _lowercase : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): # make the mask reproducible np.random.seed(2 ) _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) _lowercase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowercase : int = model_class(UpperCAmelCase_ ) _lowercase : str = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Dict = model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) _lowercase : str = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) ) _lowercase : str = model(**UpperCAmelCase_ ,noise=UpperCAmelCase_ ) _lowercase : List[str] = outputs_dict[0].numpy() _lowercase : Optional[int] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def lowerCamelCase__ ( self ): # make the mask reproducible np.random.seed(2 ) _lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) _lowercase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCAmelCase_ ): _lowercase : List[Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCAmelCase_ ): _lowercase : Union[str, Any] = v.numpy() else: _lowercase : Optional[int] = np.array(UpperCAmelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: _lowercase : Optional[int] = model_class(UpperCAmelCase_ ) _lowercase : List[Any] = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : Optional[Any] = prepare_numpy_arrays(UpperCAmelCase_ ) _lowercase : List[Any] = model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) _lowercase : Optional[int] = model(**UpperCAmelCase_ ,noise=UpperCAmelCase_ ) self.assert_outputs_same(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): # make masks reproducible np.random.seed(2 ) _lowercase : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _lowercase : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowercase : Optional[int] = tf.constant(UpperCAmelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowercase : int = tf_noise super().check_pt_tf_models(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): # make mask reproducible np.random.seed(2 ) _lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : str = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCAmelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(UpperCAmelCase_ ,UpperCAmelCase_ ),) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCAmelCase_ ,"""_keras_serializable""" ,UpperCAmelCase_ ) } _lowercase : List[str] = int((config.image_size // config.patch_size) ** 2 ) _lowercase : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowercase : Dict = tf.convert_to_tensor(UpperCAmelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: _lowercase : Tuple = main_layer_class(UpperCAmelCase_ ) _lowercase : int = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _lowercase : List[str] = tf.keras.Model(UpperCAmelCase_ ,outputs=main_layer(UpperCAmelCase_ ) ) _lowercase : str = model(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Optional[Any] = os.path.join(UpperCAmelCase_ ,"""keras_model.h5""" ) model.save(UpperCAmelCase_ ) _lowercase : Optional[int] = tf.keras.models.load_model( UpperCAmelCase_ ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCAmelCase_ ,tf.keras.Model ) _lowercase : Optional[Any] = model(UpperCAmelCase_ ) self.assert_outputs_same(UpperCAmelCase_ ,UpperCAmelCase_ ) @slow def lowerCamelCase__ ( self ): # make mask reproducible np.random.seed(2 ) _lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Tuple = int((config.image_size // config.patch_size) ** 2 ) _lowercase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowercase : Optional[Any] = model_class(UpperCAmelCase_ ) _lowercase : str = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : str = model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) if model_class.__name__ == "TFViTMAEModel": _lowercase : Union[str, Any] = outputs.last_hidden_state.numpy() _lowercase : int = 0 else: _lowercase : Union[str, Any] = outputs.logits.numpy() _lowercase : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_ ,saved_model=UpperCAmelCase_ ) _lowercase : Union[str, Any] = model_class.from_pretrained(UpperCAmelCase_ ) _lowercase : Union[str, Any] = model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) if model_class.__name__ == "TFViTMAEModel": _lowercase : Any = after_outputs["""last_hidden_state"""].numpy() _lowercase : Dict = 0 else: _lowercase : Dict = after_outputs["""logits"""].numpy() _lowercase : str = 0 _lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCAmelCase_ ,1E-5 ) def lowerCamelCase__ ( self ): # make mask reproducible np.random.seed(2 ) _lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Any = int((config.image_size // config.patch_size) ** 2 ) _lowercase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _lowercase : int = model_class(UpperCAmelCase_ ) _lowercase : Optional[Any] = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : int = model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) _lowercase : Optional[int] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCAmelCase_ ) _lowercase : Tuple = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _lowercase : Any = model_class.from_config(model.config ) _lowercase : List[str] = new_model(UpperCAmelCase_ ) # Build model new_model.set_weights(model.get_weights() ) _lowercase : int = new_model(UpperCAmelCase_ ,noise=UpperCAmelCase_ ) self.assert_outputs_same(UpperCAmelCase_ ,UpperCAmelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase__ ( self ): pass @slow def lowerCamelCase__ ( self ): _lowercase : Any = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(UpperCAmelCase_ ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) _lowercase : Any = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) _lowercase : Optional[int] = self.default_image_processor _lowercase : Optional[int] = prepare_img() _lowercase : List[str] = image_processor(images=UpperCAmelCase_ ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowercase : Tuple = ViTMAEConfig() _lowercase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowercase : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass _lowercase : Union[str, Any] = model(**UpperCAmelCase_ ,noise=UpperCAmelCase_ ) # verify the logits _lowercase : List[str] = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ ) _lowercase : int = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,UpperCAmelCase_ ,atol=1E-4 )
336
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = """<s>""" _lowercase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""<eod>""" ) self.assertEqual(len(UpperCAmelCase_ ) ,10_06 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] ) _lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) @slow def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) _lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ ( self ): # fmt: off _lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
336
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase: Optional[int] = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: str = ["""ConvNextFeatureExtractor"""] UpperCAmelCase: Union[str, Any] = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: List[str] = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: Optional[Any] = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys UpperCAmelCase: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) _lowercase : Union[str, Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = [] for line in lines: _lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments if line: filtered_lines.append(__UpperCAmelCase ) _lowercase : Tuple = """\n""".join(__UpperCAmelCase ) # Make a hash from all this code _lowercase : Tuple = full_str.encode("""utf-8""" ) return shaaaa(__UpperCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase: Tuple = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase: List[str] = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name UpperCAmelCase: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[int] = 0 _lowercase : Tuple = len(__UpperCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _lowercase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__UpperCAmelCase ): return None _lowercase : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: _lowercase : Any = left _lowercase : Optional[Any] = point elif point > right: _lowercase : str = right _lowercase : List[str] = point else: if item < current_item: _lowercase : Tuple = point - 1 else: _lowercase : Any = point + 1 return None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _lowercase : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__UpperCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif point > right: return interpolation_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( __UpperCAmelCase , __UpperCAmelCase , point + 1 , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if collection != sorted(__UpperCAmelCase ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys UpperCAmelCase: List[Any] = 0 if debug == 1: UpperCAmelCase: int = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") UpperCAmelCase: Optional[Any] = 67 UpperCAmelCase: int = interpolation_search(collection, target) if result is not None: print(F'{target} found at positions: {result}') else: print("""Not found""")
336
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
336
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : int class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ): _lowercase : list[list[Edge]] = [[] for _ in range(UpperCAmelCase_ )] _lowercase : Any = size def __getitem__( self ,UpperCAmelCase_ ): return iter(self._graph[vertex] ) @property def lowerCamelCase__ ( self ): return self._size def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(UpperCAmelCase_ ,UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = deque([start_vertex] ) _lowercase : list[int | None] = [None] * self.size _lowercase : Dict = 0 while queue: _lowercase : Tuple = queue.popleft() _lowercase : Dict = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _lowercase : Dict = current_distance + edge.weight _lowercase : List[Any] = distances[edge.destination_vertex] if ( isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _lowercase : Optional[int] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax UpperCAmelCase: str = logging.get_logger(__name__) @add_end_docstrings(snake_case ) class UpperCamelCase ( snake_case ): """simple docstring""" def __init__( self ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return super().__call__(UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): _lowercase : Union[str, Any] = {} if "candidate_labels" in kwargs: _lowercase : Optional[int] = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: _lowercase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_="This is a photo of {}." ): _lowercase : Dict = load_image(UpperCAmelCase_ ) _lowercase : Union[str, Any] = self.image_processor(images=[image] ,return_tensors=self.framework ) _lowercase : Dict = candidate_labels _lowercase : List[Any] = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels] _lowercase : Optional[int] = self.tokenizer(UpperCAmelCase_ ,return_tensors=self.framework ,padding=UpperCAmelCase_ ) _lowercase : Dict = [text_inputs] return inputs def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : Optional[int] = model_inputs.pop("""candidate_labels""" ) _lowercase : Tuple = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,UpperCAmelCase_ ): _lowercase : List[Any] = text_inputs[0] else: # Batching case. _lowercase : List[Any] = text_inputs[0][0] _lowercase : Tuple = self.model(**UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : int = model_outputs.pop("""candidate_labels""" ) _lowercase : Tuple = model_outputs["""logits"""][0] if self.framework == "pt": _lowercase : str = logits.softmax(dim=-1 ).squeeze(-1 ) _lowercase : Optional[Any] = probs.tolist() if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[int] = [scores] elif self.framework == "tf": _lowercase : List[Any] = stable_softmax(UpperCAmelCase_ ,axis=-1 ) _lowercase : Optional[Any] = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) _lowercase : List[Any] = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase_ ,UpperCAmelCase_ ) ,key=lambda UpperCAmelCase_ : -x[0] ) ] return result
336
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase: List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase: int = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
336
1
"""simple docstring""" from __future__ import annotations from typing import Any def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not postfix_notation: return 0 _lowercase : Optional[Any] = {"""+""", """-""", """*""", """/"""} _lowercase : list[Any] = [] for token in postfix_notation: if token in operations: _lowercase , _lowercase : Any = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowercase : str = [] for i in range(__UpperCAmelCase ): _lowercase : Any = i / num_diffusion_timesteps _lowercase : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCamelCase ( snake_case , snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ : str = 2 @register_to_config def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,): if trained_betas is not None: _lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "linear": _lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowercase : Any = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) _lowercase : Tuple = 1.0 - self.betas _lowercase : Dict = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ): if schedule_timesteps is None: _lowercase : Optional[int] = self.timesteps _lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0 else: _lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep _lowercase : List[str] = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCamelCase__ ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,): _lowercase : str = self.index_for_timestep(UpperCAmelCase_ ) if self.state_in_first_order: _lowercase : Optional[Any] = self.sigmas[step_index] else: _lowercase : Dict = self.sigmas_interpol[step_index] _lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,): _lowercase : List[str] = num_inference_steps _lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _lowercase : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) _lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ ) _lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) # interpolate sigmas _lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() _lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _lowercase : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(UpperCAmelCase_ ).startswith("""mps""" ): # mps does not support float64 _lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa ) else: _lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) # interpolate timesteps _lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype ) _lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() _lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] ) _lowercase : List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # get log sigma _lowercase : Optional[Any] = sigma.log() # get distribution _lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None] # get sigmas range _lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _lowercase : List[Any] = low_idx + 1 _lowercase : int = self.log_sigmas[low_idx] _lowercase : Any = self.log_sigmas[high_idx] # interpolate sigmas _lowercase : Any = (low - log_sigma) / (low - high) _lowercase : Dict = w.clamp(0 ,1 ) # transform interpolation to time range _lowercase : List[str] = (1 - w) * low_idx + w * high_idx _lowercase : Optional[int] = t.view(sigma.shape ) return t @property def lowerCamelCase__ ( self ): return self.sample is None def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,): _lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ ) # advance index counter by 1 _lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _lowercase : Any = self.sigmas[step_index] _lowercase : Any = self.sigmas_interpol[step_index + 1] _lowercase : Tuple = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _lowercase : Union[str, Any] = self.sigmas[step_index - 1] _lowercase : int = self.sigmas_interpol[step_index] _lowercase : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _lowercase : Any = 0 _lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _lowercase : List[str] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _lowercase : Any = sigma_interpol - sigma_hat # store for 2nd order step _lowercase : List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _lowercase : Optional[Any] = sigma_next - sigma_hat _lowercase : Any = self.sample _lowercase : Optional[int] = None _lowercase : str = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples _lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ): # mps does not support float64 _lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) _lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: _lowercase : List[Any] = self.timesteps.to(original_samples.device ) _lowercase : Union[str, Any] = timesteps.to(original_samples.device ) _lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps] _lowercase : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _lowercase : List[Any] = sigma.unsqueeze(-1 ) _lowercase : int = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
336
1
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase: List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def __SCREAMING_SNAKE_CASE ( ): _lowercase : str = os.path.dirname(os.path.realpath(__UpperCAmelCase ) ) _lowercase : Optional[int] = os.path.join(__UpperCAmelCase , """words.txt""" ) _lowercase : Tuple = """""" with open(__UpperCAmelCase ) as f: _lowercase : Union[str, Any] = f.readline() _lowercase : Any = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] _lowercase : Any = [ word for word in [sum(ord(__UpperCAmelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__UpperCAmelCase ) if __name__ == "__main__": print(solution())
336
"""simple docstring""" import pprint import requests UpperCAmelCase: Tuple = """https://zenquotes.io/api""" def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCAmelCase: int = random_quotes() pprint.pprint(response)
336
1
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase : """simple docstring""" @property def lowerCamelCase__ ( self ): return self.get_dummy_input() @property def lowerCamelCase__ ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def lowerCamelCase__ ( self ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,): _lowercase : Tuple = 4 _lowercase : str = 32 _lowercase : List[str] = (32, 32) _lowercase : str = torch.manual_seed(0 ) _lowercase : str = torch.device(UpperCAmelCase_ ) _lowercase : int = (batch_size, num_channels) + sizes _lowercase : Tuple = randn_tensor(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ) _lowercase : Any = {"""hidden_states""": hidden_states} if include_temb: _lowercase : Optional[Any] = 1_28 _lowercase : List[Any] = randn_tensor((batch_size, temb_channels) ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ) if include_res_hidden_states_tuple: _lowercase : Optional[int] = torch.manual_seed(1 ) _lowercase : Tuple = (randn_tensor(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ),) if include_encoder_hidden_states: _lowercase : Tuple = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase_ ) if include_skip_sample: _lowercase : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) ,generator=UpperCAmelCase_ ,device=UpperCAmelCase_ ) return dummy_input def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": _lowercase : List[Any] = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) _lowercase : str = self.dummy_input return init_dict, inputs_dict def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase , _lowercase : Any = self.prepare_init_args_and_inputs_for_common() _lowercase : Dict = self.block_class(**UpperCAmelCase_ ) unet_block.to(UpperCAmelCase_ ) unet_block.eval() with torch.no_grad(): _lowercase : int = unet_block(**UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : int = output[0] self.assertEqual(output.shape ,self.output_shape ) _lowercase : Optional[Any] = output[0, -1, -3:, -3:] _lowercase : List[Any] = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ ) assert torch_all_close(output_slice.flatten() ,UpperCAmelCase_ ,atol=5E-3 ) @unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : Dict = self.prepare_init_args_and_inputs_for_common() _lowercase : Optional[int] = self.block_class(**UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.train() _lowercase : Any = model(**UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = output[0] _lowercase : Union[str, Any] = torch.device(UpperCAmelCase_ ) _lowercase : Any = randn_tensor(output.shape ,device=UpperCAmelCase_ ) _lowercase : Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase_ ,UpperCAmelCase_ ) loss.backward()
336
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : int def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _lowercase : Tuple = all_rotations(__UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCAmelCase ), } return response def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _lowercase : Optional[Any] = int(__UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _lowercase : int = [""""""] * len(__UpperCAmelCase ) for _ in range(len(__UpperCAmelCase ) ): for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """ UpperCAmelCase: int = input(entry_msg).strip() UpperCAmelCase: List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
336
1
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
336
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Any = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCAmelCase: List[Any] = 256_047 UpperCAmelCase: List[Any] = 256_145 @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = NllbTokenizer SCREAMING_SNAKE_CASE_ : Tuple = NllbTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = {} def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : List[Any] = NllbTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = NllbTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) _lowercase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): _lowercase : List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Tuple = tempfile.mkdtemp() _lowercase : int = tokenizer_r.save_pretrained(UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) _lowercase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) # Checks everything loads correctly in the same way _lowercase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True _lowercase : Dict = tempfile.mkdtemp() _lowercase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ ) _lowercase : Dict = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) # Checks everything loads correctly in the same way _lowercase : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False _lowercase : int = tempfile.mkdtemp() _lowercase : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowercase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : int = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): if not self.test_seqaseq: return _lowercase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. _lowercase : Dict = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] _lowercase : Dict = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: _lowercase : Dict = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase_ ,tgt_texts=UpperCAmelCase_ ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified _lowercase : Optional[Any] = tokenizer.prepare_seqaseq_batch( UpperCAmelCase_ ,tgt_texts=UpperCAmelCase_ ,max_length=3 ,return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) _lowercase : List[str] = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase_ ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn("""decoder_input_ids""" ,UpperCAmelCase_ ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : Any = [AddedToken("""<special>""" ,lstrip=UpperCAmelCase_ )] _lowercase : Dict = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ ,additional_special_tokens=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" ) _lowercase : Any = tokenizer_r.encode("""<special>""" ,add_special_tokens=UpperCAmelCase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ ,additional_special_tokens=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : Tuple = self.tokenizer_class.from_pretrained( UpperCAmelCase_ ,additional_special_tokens=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = tokenizer_p.encode("""Hey this is a <special> token""" ) _lowercase : Optional[Any] = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/nllb-200-distilled-600M" SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] SCREAMING_SNAKE_CASE_ : int = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] SCREAMING_SNAKE_CASE_ : List[str] = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def lowerCamelCase__ ( cls ): _lowercase : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ) _lowercase : Any = 1 return cls def lowerCamelCase__ ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,25_60_57 ) def lowerCamelCase__ ( self ): _lowercase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): self.assertIn(UpperCAmelCase_ ,self.tokenizer.all_special_ids ) # fmt: off _lowercase : int = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on _lowercase : str = self.tokenizer.decode(UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ ) _lowercase : List[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertNotIn(self.tokenizer.eos_token ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] ,UpperCAmelCase_ ) _lowercase : List[str] = 10 _lowercase : int = self.tokenizer(UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,UpperCAmelCase_ ) self.assertEqual(len(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_62_03, 3] ) def lowerCamelCase__ ( self ): _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase_ ) _lowercase : str = NllbTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) _lowercase : str = shift_tokens_right( batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) _lowercase : int = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def lowerCamelCase__ ( self ): _lowercase : Any = self.tokenizer(self.src_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=3 ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = self.tokenizer( text_target=self.tgt_text ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=10 ,return_tensors="""pt""" ) _lowercase : List[str] = targets["""input_ids"""] _lowercase : int = shift_tokens_right( UpperCAmelCase_ ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def lowerCamelCase__ ( self ): _lowercase : int = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(UpperCAmelCase_ ) ,{ # A, test, EOS, en_XX """input_ids""": [[25_60_47, 70, 73_56, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_60_57, } ,) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = True _lowercase : List[Any] = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) _lowercase : Optional[int] = False _lowercase : str = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
336
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase: Any = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: int = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCAmelCase: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
336
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase: Tuple = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Tuple = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,): _lowercase : Dict = vocab_size _lowercase : List[str] = action_weight _lowercase : int = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : Any = action_dim _lowercase : List[str] = observation_dim _lowercase : Union[str, Any] = transition_dim _lowercase : str = learning_rate _lowercase : Tuple = n_layer _lowercase : Optional[int] = n_head _lowercase : List[str] = n_embd _lowercase : List[str] = embd_pdrop _lowercase : Optional[Any] = attn_pdrop _lowercase : List[Any] = resid_pdrop _lowercase : str = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : List[Any] = kaiming_initializer_range _lowercase : List[Any] = use_cache super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
336
1
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase: List[str] = """""" UpperCAmelCase: Optional[int] = """""" UpperCAmelCase: int = """""" UpperCAmelCase: Union[str, Any] = """""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): # authorize twitter, initialize tweepy _lowercase : Dict = tweepy.OAuthHandler(__UpperCAmelCase , __UpperCAmelCase ) auth.set_access_token(__UpperCAmelCase , __UpperCAmelCase ) _lowercase : Union[str, Any] = tweepy.API(__UpperCAmelCase ) # initialize a list to hold all the tweepy Tweets _lowercase : Union[str, Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) _lowercase : Union[str, Any] = api.user_timeline(screen_name=__UpperCAmelCase , count=200 ) # save most recent tweets alltweets.extend(__UpperCAmelCase ) # save the id of the oldest tweet less one _lowercase : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__UpperCAmelCase ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates _lowercase : Any = api.user_timeline( screen_name=__UpperCAmelCase , count=200 , max_id=__UpperCAmelCase ) # save most recent tweets alltweets.extend(__UpperCAmelCase ) # update the id of the oldest tweet less one _lowercase : List[str] = alltweets[-1].id - 1 print(F"""...{len(__UpperCAmelCase )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv _lowercase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , """w""" ) as f: _lowercase : Any = csv.writer(__UpperCAmelCase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(__UpperCAmelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
336
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: List[str] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model" def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Tuple = intermediate_size _lowercase : List[Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = patch_size _lowercase : Optional[Any] = image_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[Any] = attention_dropout _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : Tuple = qkv_bias @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = hidden_act _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Tuple = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Any = position_embedding_type _lowercase : Dict = cross_attention_frequency _lowercase : Optional[Any] = encoder_hidden_size @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : str = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "instructblip" SCREAMING_SNAKE_CASE_ : List[str] = True def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) if vision_config is None: _lowercase : str = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: _lowercase : Any = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: _lowercase : Optional[int] = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ ) _lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ ) _lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : Union[str, Any] = self.text_config.is_encoder_decoder _lowercase : List[str] = num_query_tokens _lowercase : List[str] = self.vision_config.hidden_size _lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : Union[str, Any] = 1.0 _lowercase : Dict = 0.02 @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowercase : int = self.vision_config.to_dict() _lowercase : Any = self.qformer_config.to_dict() _lowercase : Any = self.text_config.to_dict() _lowercase : Optional[int] = self.__class__.model_type return output
336
1
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Tuple = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase: Optional[int] = logging.get_logger(__name__) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = "mask2former" SCREAMING_SNAKE_CASE_ : Tuple = ["swin"] SCREAMING_SNAKE_CASE_ : str = {"hidden_size": "hidden_dim"} def __init__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 2_56 ,UpperCAmelCase_ = 10_24 ,UpperCAmelCase_ = "relu" ,UpperCAmelCase_ = 6 ,UpperCAmelCase_ = 10 ,UpperCAmelCase_ = 8 ,UpperCAmelCase_ = 0.0 ,UpperCAmelCase_ = 20_48 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 4 ,UpperCAmelCase_ = 2_55 ,UpperCAmelCase_ = 1_00 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 2.0 ,UpperCAmelCase_ = 5.0 ,UpperCAmelCase_ = 5.0 ,UpperCAmelCase_ = 1_25_44 ,UpperCAmelCase_ = 3.0 ,UpperCAmelCase_ = 0.75 ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 1.0 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = [4, 8, 16, 32] ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) _lowercase : List[str] = CONFIG_MAPPING["""swin"""]( image_size=2_24 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=UpperCAmelCase_ ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[int] = backbone_config.pop("""model_type""" ) _lowercase : Any = CONFIG_MAPPING[backbone_model_type] _lowercase : Any = config_class.from_dict(UpperCAmelCase_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) _lowercase : int = backbone_config _lowercase : str = feature_size _lowercase : Union[str, Any] = mask_feature_size _lowercase : Any = hidden_dim _lowercase : Optional[Any] = encoder_feedforward_dim _lowercase : Union[str, Any] = activation_function _lowercase : List[Any] = encoder_layers _lowercase : str = decoder_layers _lowercase : Any = num_attention_heads _lowercase : Any = dropout _lowercase : Optional[int] = dim_feedforward _lowercase : Union[str, Any] = pre_norm _lowercase : Optional[int] = enforce_input_projection _lowercase : Tuple = common_stride _lowercase : List[Any] = ignore_value _lowercase : List[Any] = num_queries _lowercase : Dict = no_object_weight _lowercase : List[str] = class_weight _lowercase : int = mask_weight _lowercase : Any = dice_weight _lowercase : str = train_num_points _lowercase : str = oversample_ratio _lowercase : Tuple = importance_sample_ratio _lowercase : List[Any] = init_std _lowercase : Optional[Any] = init_xavier_std _lowercase : Any = use_auxiliary_loss _lowercase : List[str] = feature_strides _lowercase : Any = output_auxiliary_logits _lowercase : Any = decoder_layers super().__init__(**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return cls( backbone_config=UpperCAmelCase_ ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Dict = copy.deepcopy(self.__dict__ ) _lowercase : List[str] = self.backbone_config.to_dict() _lowercase : Any = self.__class__.model_type return output
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCAmelCase: Union[str, Any] = logging.getLogger(__name__) UpperCAmelCase: List[Any] = """pytorch_model.bin""" @dataclasses.dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = dataclasses.field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) SCREAMING_SNAKE_CASE_ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = dataclasses.field( default=snake_case , metadata={"help": "A csv or a json file containing the validation data."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = dataclasses.field( default=snake_case , metadata={"help": "The name of the task to train on."} , ) SCREAMING_SNAKE_CASE_ : Optional[List[str]] = dataclasses.field( default=snake_case , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = dataclasses.field( default=1_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) SCREAMING_SNAKE_CASE_ : Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) SCREAMING_SNAKE_CASE_ : Optional[bool] = dataclasses.field( default=snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) SCREAMING_SNAKE_CASE_ : Optional[bool] = dataclasses.field( default=snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) SCREAMING_SNAKE_CASE_ : Optional[bool] = dataclasses.field( default=snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) SCREAMING_SNAKE_CASE_ : Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) SCREAMING_SNAKE_CASE_ : Optional[int] = dataclasses.field( default=1_0_0 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) SCREAMING_SNAKE_CASE_ : Optional[int] = dataclasses.field( default=snake_case , metadata={"help": "Random seed for initialization."} , ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Any = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: _lowercase : Optional[Any] = dataset.filter(lambda __UpperCAmelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 _lowercase : int = int(eval_result * len(__UpperCAmelCase ) ) print(__UpperCAmelCase ) _lowercase : Optional[Any] = dataset.sort("""probability""" , reverse=__UpperCAmelCase ) _lowercase : Dict = dataset.select(range(__UpperCAmelCase ) ) _lowercase : Any = dataset.remove_columns(["""label""", """probability"""] ) _lowercase : List[str] = dataset.rename_column("""prediction""" , """label""" ) _lowercase : List[str] = dataset.map(lambda __UpperCAmelCase : {"label": idalabel[example["label"]]} ) _lowercase : Optional[int] = dataset.shuffle(seed=args.seed ) _lowercase : List[Any] = os.path.join(__UpperCAmelCase , F"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(__UpperCAmelCase , index=__UpperCAmelCase ) else: dataset.to_json(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): _lowercase : Any = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() _lowercase : Union[str, Any] = STModelArguments(model_name_or_path=__UpperCAmelCase ) _lowercase : int = STDataArguments(train_file=__UpperCAmelCase , infer_file=__UpperCAmelCase ) _lowercase : Any = STTrainingArguments(output_dir=__UpperCAmelCase ) _lowercase : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__UpperCAmelCase ).items(): setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for key, value in kwargs.items(): if hasattr(__UpperCAmelCase , __UpperCAmelCase ): setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Sanity checks _lowercase : Any = {} _lowercase : Optional[Any] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None _lowercase : Optional[Any] = args.train_file _lowercase : List[str] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None _lowercase : Dict = args.eval_file for key in data_files: _lowercase : Tuple = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: _lowercase : int = extension else: assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) _lowercase : Any = F"""{args.output_dir}/self-train_iter-{{}}""".format _lowercase : List[str] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__UpperCAmelCase ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) accelerator.wait_for_everyone() _lowercase : Optional[int] = None _lowercase : List[Any] = None _lowercase : Dict = 0 _lowercase : Union[str, Any] = False # Show the progress bar _lowercase : Tuple = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): _lowercase : int = data_dir_format(__UpperCAmelCase ) assert os.path.exists(__UpperCAmelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 _lowercase : Any = os.path.join(__UpperCAmelCase , """stage-1""" ) _lowercase : Any = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__UpperCAmelCase , __UpperCAmelCase ): arguments_dict.update({key: value} ) _lowercase : Optional[Any] = os.path.join(__UpperCAmelCase , """best-checkpoint""" , __UpperCAmelCase ) if os.path.exists(__UpperCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __UpperCAmelCase , __UpperCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __UpperCAmelCase ) finetune(**__UpperCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__UpperCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __UpperCAmelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data _lowercase : List[str] = os.path.join(__UpperCAmelCase , """best-checkpoint""" ) _lowercase : Optional[Any] = os.path.join(__UpperCAmelCase , """stage-2""" ) # Update arguments_dict _lowercase : Union[str, Any] = model_path _lowercase : Optional[Any] = data_files["""train"""] _lowercase : str = current_output_dir _lowercase : int = os.path.join(__UpperCAmelCase , """best-checkpoint""" , __UpperCAmelCase ) if os.path.exists(__UpperCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __UpperCAmelCase , __UpperCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __UpperCAmelCase ) finetune(**__UpperCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__UpperCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __UpperCAmelCase ) _lowercase : List[str] = iteration _lowercase : Optional[Any] = data_dir_format(iteration + 1 ) _lowercase : Any = AutoConfig.from_pretrained(os.path.join(__UpperCAmelCase , """best-checkpoint""" ) ) _lowercase : int = config.idalabel _lowercase : Optional[Any] = os.path.join(__UpperCAmelCase , """eval_results_best-checkpoint.json""" ) _lowercase : Tuple = os.path.join(__UpperCAmelCase , """test_results_best-checkpoint.json""" ) assert os.path.exists(__UpperCAmelCase ) with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Tuple = float(json.load(__UpperCAmelCase )[args.eval_metric] ) _lowercase : str = os.path.join(__UpperCAmelCase , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(__UpperCAmelCase ) # Loading the dataset from local csv or json files. _lowercase : str = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] _lowercase : Optional[Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) shutil.copy(__UpperCAmelCase , os.path.join(__UpperCAmelCase , F"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(__UpperCAmelCase ): shutil.copy(__UpperCAmelCase , os.path.join(__UpperCAmelCase , F"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) accelerator.wait_for_everyone() _lowercase : Optional[int] = os.path.join(__UpperCAmelCase , F"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: _lowercase : Union[str, Any] = eval_result if best_iteration is None: _lowercase : str = new_iteration _lowercase : Any = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: _lowercase : str = new_iteration _lowercase : Union[str, Any] = new_eval_result _lowercase : Optional[Any] = 0 else: if new_eval_result == best_eval_result: _lowercase : Optional[int] = new_iteration _lowercase : Dict = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: _lowercase : List[Any] = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , __UpperCAmelCase ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __UpperCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__UpperCAmelCase , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(__UpperCAmelCase , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __UpperCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__UpperCAmelCase , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__UpperCAmelCase , """eval_results_best-iteration.json""" ) , )
336
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _lowercase : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowercase : Dict = {"""unk_token""": """<unk>"""} _lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) _lowercase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,UpperCAmelCase_ ) self.assertIn("""attention_mask""" ,UpperCAmelCase_ ) self.assertNotIn("""labels""" ,UpperCAmelCase_ ) self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : List[Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(batch.input_ids.shape ,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = ["""A long paragraph for summarization."""] _lowercase : Dict = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = inputs["""input_ids"""] _lowercase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : str = ["""Summary of the text.""", """Another summary."""] _lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ) _lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]] _lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = """A, <mask> AllenNLP sentence.""" _lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) _lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
336
1
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__UpperCAmelCase , 2 ) - pow(__UpperCAmelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__UpperCAmelCase , 2 ) - pow(__UpperCAmelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__UpperCAmelCase , 2 ) + pow(__UpperCAmelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase: List[Any] = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = "donut-swin" SCREAMING_SNAKE_CASE_ : Any = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=4 ,UpperCAmelCase_=3 ,UpperCAmelCase_=96 ,UpperCAmelCase_=[2, 2, 6, 2] ,UpperCAmelCase_=[3, 6, 12, 24] ,UpperCAmelCase_=7 ,UpperCAmelCase_=4.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=False ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = image_size _lowercase : int = patch_size _lowercase : Optional[int] = num_channels _lowercase : List[Any] = embed_dim _lowercase : Dict = depths _lowercase : Tuple = len(UpperCAmelCase_ ) _lowercase : int = num_heads _lowercase : Optional[int] = window_size _lowercase : Optional[Any] = mlp_ratio _lowercase : str = qkv_bias _lowercase : str = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Union[str, Any] = drop_path_rate _lowercase : Tuple = hidden_act _lowercase : int = use_absolute_embeddings _lowercase : List[Any] = layer_norm_eps _lowercase : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowercase : Optional[Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
336
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
1
"""simple docstring""" from __future__ import annotations def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : List[Any] = list(range(len(__UpperCAmelCase ) ) ) _lowercase : Optional[int] = [v / w for v, w in zip(__UpperCAmelCase , __UpperCAmelCase )] index.sort(key=lambda __UpperCAmelCase : ratio[i] , reverse=__UpperCAmelCase ) _lowercase : float = 0 _lowercase : list[float] = [0] * len(__UpperCAmelCase ) for i in index: if weight[i] <= capacity: _lowercase : Optional[Any] = 1 max_value += value[i] capacity -= weight[i] else: _lowercase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" UpperCAmelCase: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase: int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
336
1
"""simple docstring""" import collections import os import re from pathlib import Path UpperCAmelCase: int = """src/transformers""" # Matches is_xxx_available() UpperCAmelCase: Optional[int] = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} UpperCAmelCase: List[str] = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase: Optional[Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available UpperCAmelCase: Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase: List[str] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase: List[str] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase: int = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase: Tuple = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo UpperCAmelCase: List[Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: UpperCAmelCase: Union[str, Any] = re.compile(r"""^\s*try:""") # Catches a line with else: UpperCAmelCase: Union[str, Any] = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if _re_test_backend.search(__UpperCAmelCase ) is None: return None _lowercase : Any = [b[0] for b in _re_backend.findall(__UpperCAmelCase )] backends.sort() return "_and_".join(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowercase : str = f.readlines() _lowercase : Union[str, Any] = 0 while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure _lowercase : Optional[Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: _lowercase : Tuple = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__UpperCAmelCase ): _lowercase : Tuple = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0] _lowercase : List[Any] = re.findall(R"""\[([^\]]+)\]""" , __UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue _lowercase : Any = _re_import_struct_key_value.search(__UpperCAmelCase ) if single_line_import_search is not None: _lowercase : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 _lowercase : List[str] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. _lowercase : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowercase : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowercase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): _lowercase : Optional[int] = lines[line_index] if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None: _lowercase : Optional[Any] = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(""", """ ) _lowercase : Tuple = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_between_brackets.search(__UpperCAmelCase ) is not None: _lowercase : Dict = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(""", """ ) _lowercase : int = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_quote_object.search(__UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 _lowercase : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _lowercase : Optional[Any] = [] while ( line_index < len(__UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): _lowercase : Dict = lines[line_index] _lowercase : Tuple = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 _lowercase : Optional[int] = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. _lowercase : int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _lowercase : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _lowercase : Union[str, Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): _lowercase : List[Any] = lines[line_index] _lowercase : Optional[Any] = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 _lowercase : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): def find_duplicates(__UpperCAmelCase ): return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _lowercase : int = [] for key in import_dict_objects.keys(): _lowercase : Tuple = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _lowercase : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _lowercase : Optional[int] = """base imports""" if key == """none""" else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [] for root, _, files in os.walk(__UpperCAmelCase ): if "__init__.py" in files: _lowercase : List[str] = os.path.join(__UpperCAmelCase , """__init__.py""" ) _lowercase : Union[str, Any] = parse_init(__UpperCAmelCase ) if objects is not None: _lowercase : Any = analyze_results(*__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _lowercase : List[str] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("""\n""".join(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > 0: raise ValueError("""\n\n""".join(__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : str = [] for path, directories, files in os.walk(__UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue _lowercase : int = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) ) _lowercase : int = short_path.replace(os.path.sep , """.""" ) submodules.append(__UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue _lowercase : Union[str, Any] = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) ) _lowercase : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__UpperCAmelCase ) return submodules UpperCAmelCase: int = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __SCREAMING_SNAKE_CASE ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _lowercase : Dict = direct_transformers_import(__UpperCAmelCase ) _lowercase : Optional[int] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__UpperCAmelCase , """__init__.py""" ) , """r""" ) as f: _lowercase : int = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __UpperCAmelCase ) ) ) _lowercase : Union[str, Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__UpperCAmelCase ) > 0: _lowercase : List[str] = """\n""".join(F"""- {module}""" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F"""{list_of_modules}\n""" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
336
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
1
"""simple docstring""" from queue import PriorityQueue from typing import Any import numpy as np def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): for nxt, d in graph[v]: if nxt in visited_forward: continue _lowercase : Any = cst_fwd.get(__UpperCAmelCase , np.inf ) _lowercase : Optional[int] = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowercase : Optional[int] = new_cost_f _lowercase : str = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowercase : List[Any] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : int = -1 _lowercase : Tuple = set() _lowercase : Dict = set() _lowercase : int = {source: 0} _lowercase : Optional[int] = {destination: 0} _lowercase : List[Any] = {source: None} _lowercase : List[str] = {destination: None} _lowercase : PriorityQueue[Any] = PriorityQueue() _lowercase : PriorityQueue[Any] = PriorityQueue() _lowercase : List[str] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowercase , _lowercase : Optional[int] = queue_forward.get() visited_forward.add(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = queue_backward.get() visited_backward.add(__UpperCAmelCase ) _lowercase : Tuple = pass_and_relaxation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) _lowercase : Union[str, Any] = pass_and_relaxation( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowercase : Optional[int] = shortest_distance return shortest_path_distance UpperCAmelCase: List[Any] = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } UpperCAmelCase: Tuple = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase: Tuple = [0, 25, 50] UpperCAmelCase: List[Any] = [25, 50, 75] UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca) UpperCAmelCase: Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase: List[Any] = np.ones(75) UpperCAmelCase: Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase: int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase: int = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
336
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCAmelCase: int = logging.get_logger(__name__) class UpperCamelCase ( snake_case ): """simple docstring""" def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" ,UpperCAmelCase_ ,) super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
336
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
1
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase: List[str] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""") def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16000 ): _lowercase : List[Any] = int(round(sample_rate * max_length ) ) if len(__UpperCAmelCase ) <= sample_length: return wav _lowercase : List[Any] = randint(0 , len(__UpperCAmelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=snake_case , metadata={"help": "Name of a dataset from the datasets package"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "A file containing the training audio paths and labels."} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "A file containing the validation audio paths and labels."} ) SCREAMING_SNAKE_CASE_ : str = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) SCREAMING_SNAKE_CASE_ : str = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) SCREAMING_SNAKE_CASE_ : str = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) SCREAMING_SNAKE_CASE_ : str = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE_ : float = field( default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) SCREAMING_SNAKE_CASE_ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=snake_case , metadata={"help": "Name or path of preprocessor config."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[bool] = field( default=snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) SCREAMING_SNAKE_CASE_ : bool = field( default=snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def lowerCamelCase__ ( self ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" ,UpperCAmelCase_ ,) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def __SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowercase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowercase , _lowercase , _lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowercase , _lowercase , _lowercase : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowercase : Any = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _lowercase : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowercase : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. _lowercase : int = DatasetDict() _lowercase : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase : Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _lowercase : List[str] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _lowercase : int = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _lowercase : int = feature_extractor.model_input_names[0] def train_transforms(__UpperCAmelCase ): _lowercase : Optional[Any] = [] for audio in batch[data_args.audio_column_name]: _lowercase : Dict = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__UpperCAmelCase ) _lowercase : Any = feature_extractor(__UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate ) _lowercase : Optional[int] = {model_input_name: inputs.get(__UpperCAmelCase )} _lowercase : List[str] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__UpperCAmelCase ): _lowercase : List[str] = [audio["""array"""] for audio in batch[data_args.audio_column_name]] _lowercase : Dict = feature_extractor(__UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate ) _lowercase : int = {model_input_name: inputs.get(__UpperCAmelCase )} _lowercase : Optional[int] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _lowercase : Dict = raw_datasets["""train"""].features[data_args.label_column_name].names _lowercase , _lowercase : List[Any] = {}, {} for i, label in enumerate(__UpperCAmelCase ): _lowercase : Tuple = str(__UpperCAmelCase ) _lowercase : Any = label # Load the accuracy metric from the datasets package _lowercase : str = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): _lowercase : int = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__UpperCAmelCase , references=eval_pred.label_ids ) _lowercase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel=__UpperCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase : Optional[int] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _lowercase : Tuple = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__UpperCAmelCase , output_all_columns=__UpperCAmelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: _lowercase : str = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__UpperCAmelCase , output_all_columns=__UpperCAmelCase ) # Initialize our trainer _lowercase : Union[str, Any] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # Training if training_args.do_train: _lowercase : Tuple = None if training_args.resume_from_checkpoint is not None: _lowercase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowercase : List[Any] = last_checkpoint _lowercase : Tuple = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowercase : Any = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) # Write model card and (optionally) push to hub _lowercase : Any = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) if __name__ == "__main__": main()
336
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging UpperCAmelCase: str = logging.get_logger(__name__) UpperCAmelCase: Dict = { """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "bloom" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : int = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self ,UpperCAmelCase_=25_08_80 ,UpperCAmelCase_=64 ,UpperCAmelCase_=2 ,UpperCAmelCase_=8 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=2 ,UpperCAmelCase_=False ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,**UpperCAmelCase_ ,): _lowercase : List[str] = vocab_size # Backward compatibility with n_embed kwarg _lowercase : Tuple = kwargs.pop("""n_embed""" ,UpperCAmelCase_ ) _lowercase : str = hidden_size if n_embed is None else n_embed _lowercase : List[Any] = n_layer _lowercase : str = n_head _lowercase : List[Any] = layer_norm_epsilon _lowercase : Tuple = initializer_range _lowercase : Tuple = use_cache _lowercase : List[str] = pretraining_tp _lowercase : Union[str, Any] = apply_residual_connection_post_layernorm _lowercase : int = hidden_dropout _lowercase : List[str] = attention_dropout _lowercase : Optional[int] = bos_token_id _lowercase : List[Any] = eos_token_id _lowercase : Union[str, Any] = slow_but_exact super().__init__(bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = version.parse("1.12" ) def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "default" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,): super().__init__(UpperCAmelCase_ ,task=UpperCAmelCase_ ,patching_specs=UpperCAmelCase_ ,use_past=UpperCAmelCase_ ) if not getattr(self._config ,"""pad_token_id""" ,UpperCAmelCase_ ): # TODO: how to do that better? _lowercase : Tuple = 0 @property def lowerCamelCase__ ( self ): _lowercase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(UpperCAmelCase_ ,direction="""inputs""" ,inverted_values_shape=UpperCAmelCase_ ) _lowercase : Any = {0: """batch""", 1: """past_sequence + sequence"""} else: _lowercase : Dict = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self ): return self._config.n_layer @property def lowerCamelCase__ ( self ): return self._config.n_head @property def lowerCamelCase__ ( self ): return 1E-3 def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,): _lowercase : Union[str, Any] = super(UpperCAmelCase_ ,self ).generate_dummy_inputs( UpperCAmelCase_ ,batch_size=UpperCAmelCase_ ,seq_length=UpperCAmelCase_ ,is_pair=UpperCAmelCase_ ,framework=UpperCAmelCase_ ) # We need to order the input in the way they appears in the forward() _lowercase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowercase , _lowercase : Any = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _lowercase : Optional[Any] = seqlen + 2 _lowercase : Optional[int] = self._config.hidden_size // self.num_attention_heads _lowercase : Tuple = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) _lowercase : str = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) _lowercase : List[Any] = [ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers ) ] _lowercase : Optional[Any] = common_inputs["""attention_mask"""] if self.use_past: _lowercase : Optional[Any] = ordered_inputs["""attention_mask"""].dtype _lowercase : Any = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )] ,dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self ): return 13
336
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = """<s>""" _lowercase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""<eod>""" ) self.assertEqual(len(UpperCAmelCase_ ) ,10_06 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] ) _lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) @slow def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) _lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ ( self ): # fmt: off _lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
336
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = DiTPipeline SCREAMING_SNAKE_CASE_ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE_ : List[str] = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } SCREAMING_SNAKE_CASE_ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE_ : List[str] = False def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _lowercase : Union[str, Any] = TransformeraDModel( sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=UpperCAmelCase_ ,activation_fn="""gelu-approximate""" ,num_embeds_ada_norm=10_00 ,norm_type="""ada_norm_zero""" ,norm_elementwise_affine=UpperCAmelCase_ ,) _lowercase : Optional[int] = AutoencoderKL() _lowercase : List[Any] = DDIMScheduler() _lowercase : Dict = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith("""mps""" ): _lowercase : Tuple = torch.manual_seed(UpperCAmelCase_ ) else: _lowercase : List[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) _lowercase : Any = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCamelCase__ ( self ): _lowercase : List[str] = """cpu""" _lowercase : Union[str, Any] = self.get_dummy_components() _lowercase : List[Any] = self.pipeline_class(**UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) _lowercase : Any = self.get_dummy_inputs(UpperCAmelCase_ ) _lowercase : Union[str, Any] = pipe(**UpperCAmelCase_ ).images _lowercase : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 16, 16, 3) ) _lowercase : Tuple = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowercase : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase_ ,1E-3 ) def lowerCamelCase__ ( self ): self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase_ ,expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ): _lowercase : Optional[int] = torch.manual_seed(0 ) _lowercase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowercase : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowercase : Tuple = pipe.get_label_ids(UpperCAmelCase_ ) _lowercase : Optional[Any] = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=40 ,output_type="""np""" ).images for word, image in zip(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : str = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowerCamelCase__ ( self ): _lowercase : List[str] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowercase : str = ["""vase""", """umbrella"""] _lowercase : Optional[int] = pipe.get_label_ids(UpperCAmelCase_ ) _lowercase : str = torch.manual_seed(0 ) _lowercase : int = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=25 ,output_type="""np""" ).images for word, image in zip(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=UpperCAmelCase_ ,) assert hasattr(self ,"""env""" ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # configuration for running training on smdistributed Model Parallel _lowercase : Union[str, Any] = { """enabled""": True, """processes_per_host""": 8, } _lowercase : Optional[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } _lowercase : List[str] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} _lowercase : str = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=UpperCAmelCase_ ,instance_type=self.instance_type ,debugger_hook_config=UpperCAmelCase_ ,hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_00, } ,metric_definitions=self.env.metric_definitions ,distribution=UpperCAmelCase_ ,py_version="""py36""" ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # create estimator _lowercase : List[Any] = self.create_estimator(UpperCAmelCase_ ) # run training estimator.fit() # result dataframe _lowercase : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowercase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) _lowercase : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowercase : Union[str, Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,UpperCAmelCase_ )
336
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = [] for line in lines: _lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments if line: filtered_lines.append(__UpperCAmelCase ) _lowercase : Tuple = """\n""".join(__UpperCAmelCase ) # Make a hash from all this code _lowercase : Tuple = full_str.encode("""utf-8""" ) return shaaaa(__UpperCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase: Tuple = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase: List[str] = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name UpperCAmelCase: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
336
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,): _lowercase : Tuple = size if size is not None else {"""shortest_edge""": 18} _lowercase : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowercase : Optional[Any] = parent _lowercase : Tuple = batch_size _lowercase : Optional[int] = num_channels _lowercase : Tuple = image_size _lowercase : Dict = min_resolution _lowercase : int = max_resolution _lowercase : Optional[int] = do_resize _lowercase : Any = size _lowercase : str = do_center_crop _lowercase : Optional[Any] = crop_size _lowercase : Union[str, Any] = do_normalize _lowercase : Union[str, Any] = image_mean _lowercase : Tuple = image_std def lowerCamelCase__ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = LevitImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self ): _lowercase : Tuple = LevitImageProcessingTester(self ) @property def lowerCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self ): _lowercase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) _lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input _lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Optional[int] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,np.ndarray ) # Test not batched input _lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Optional[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor ) # Test not batched input _lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : int = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
336
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
336
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ,UpperCAmelCase_ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): _lowercase : Union[str, Any] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = """sshleifer/tiny-gpt2""" _lowercase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,) _lowercase : Any = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : int = """sgugger/tiny-distilbert-classification""" _lowercase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,only_pretrain_model=UpperCAmelCase_ ,) _lowercase : List[Any] = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : List[str] = """sshleifer/tiny-gpt2""" _lowercase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,) _lowercase : List[str] = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : List[Any] = """sshleifer/tiny-gpt2""" _lowercase : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) _lowercase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,) _lowercase : str = TensorFlowBenchmark(UpperCAmelCase_ ,[config] ) _lowercase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = """sshleifer/tiny-gpt2""" _lowercase : str = AutoConfig.from_pretrained(UpperCAmelCase_ ) _lowercase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,) _lowercase : Union[str, Any] = TensorFlowBenchmark(UpperCAmelCase_ ,[config] ) _lowercase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = """sshleifer/tiny-gpt2""" _lowercase : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,) _lowercase : Optional[int] = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase__ ( self ): _lowercase : Tuple = """sshleifer/tiny-gpt2""" _lowercase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ ) _lowercase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,) _lowercase : Dict = TensorFlowBenchmark(UpperCAmelCase_ ,[config] ) _lowercase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowerCamelCase__ ( self ): _lowercase : Tuple = """patrickvonplaten/t5-tiny-random""" _lowercase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ ) _lowercase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,) _lowercase : str = TensorFlowBenchmark(UpperCAmelCase_ ,configs=[config] ) _lowercase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def lowerCamelCase__ ( self ): _lowercase : Any = """sshleifer/tiny-gpt2""" _lowercase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,) _lowercase : List[str] = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=UpperCAmelCase_ ,save_to_csv=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(UpperCAmelCase_ ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(UpperCAmelCase_ ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(UpperCAmelCase_ ,"""env.csv""" ) ,multi_process=UpperCAmelCase_ ,) _lowercase : Tuple = TensorFlowBenchmark(UpperCAmelCase_ ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""env.csv""" ) ).exists() ) def lowerCamelCase__ ( self ): _lowercase : Dict = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCAmelCase_ ): self.assertTrue(hasattr(UpperCAmelCase_ ,"""sequential""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""cumulative""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""current""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(UpperCAmelCase_ ,"""log.txt""" ) ,log_print=UpperCAmelCase_ ,trace_memory_line_by_line=UpperCAmelCase_ ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,) _lowercase : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_ ) _lowercase : int = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""log.txt""" ) ).exists() )
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder UpperCAmelCase: Optional[Any] = """__DUMMY_TRANSFORMERS_USER__""" UpperCAmelCase: Tuple = """Dummy User""" UpperCAmelCase: Union[str, Any] = """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""" UpperCAmelCase: Tuple = """https://hub-ci.huggingface.co""" UpperCAmelCase: List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" UpperCAmelCase: int = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" UpperCAmelCase: Dict = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): HfFolder.save_token(__UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( ): return HfApi(endpoint=__UpperCAmelCase ) @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = HfFolder.get_token() HfFolder.save_token(__UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCAmelCase ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): def _cleanup_repo(__UpperCAmelCase ): hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): @contextmanager def _temporary_repo(__UpperCAmelCase ): try: yield repo_id finally: cleanup_repo(__UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[int] = F"""repo_txt_data-{int(time.time() * 1_0E3 )}""" _lowercase : Optional[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[int] = F"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}""" _lowercase : str = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}""" _lowercase : List[Any] = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): return hf_private_dataset_repo_zipped_img_data_
336
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase: List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase: int = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
336
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase ( metaclass=snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["flax"] def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(self ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] ) @classmethod def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): requires_backends(cls ,["""flax"""] )
336
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowercase : str = [] for i in range(__UpperCAmelCase ): _lowercase : Any = i / num_diffusion_timesteps _lowercase : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCamelCase ( snake_case , snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ : str = 2 @register_to_config def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,): if trained_betas is not None: _lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "linear": _lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowercase : Any = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) _lowercase : Tuple = 1.0 - self.betas _lowercase : Dict = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ): if schedule_timesteps is None: _lowercase : Optional[int] = self.timesteps _lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0 else: _lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep _lowercase : List[str] = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCamelCase__ ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,): _lowercase : str = self.index_for_timestep(UpperCAmelCase_ ) if self.state_in_first_order: _lowercase : Optional[Any] = self.sigmas[step_index] else: _lowercase : Dict = self.sigmas_interpol[step_index] _lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,): _lowercase : List[str] = num_inference_steps _lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _lowercase : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) _lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ ) _lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) # interpolate sigmas _lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() _lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _lowercase : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(UpperCAmelCase_ ).startswith("""mps""" ): # mps does not support float64 _lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa ) else: _lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) # interpolate timesteps _lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype ) _lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() _lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] ) _lowercase : List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # get log sigma _lowercase : Optional[Any] = sigma.log() # get distribution _lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None] # get sigmas range _lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _lowercase : List[Any] = low_idx + 1 _lowercase : int = self.log_sigmas[low_idx] _lowercase : Any = self.log_sigmas[high_idx] # interpolate sigmas _lowercase : Any = (low - log_sigma) / (low - high) _lowercase : Dict = w.clamp(0 ,1 ) # transform interpolation to time range _lowercase : List[str] = (1 - w) * low_idx + w * high_idx _lowercase : Optional[int] = t.view(sigma.shape ) return t @property def lowerCamelCase__ ( self ): return self.sample is None def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,): _lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ ) # advance index counter by 1 _lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _lowercase : Any = self.sigmas[step_index] _lowercase : Any = self.sigmas_interpol[step_index + 1] _lowercase : Tuple = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _lowercase : Union[str, Any] = self.sigmas[step_index - 1] _lowercase : int = self.sigmas_interpol[step_index] _lowercase : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _lowercase : Any = 0 _lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _lowercase : List[str] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _lowercase : Any = sigma_interpol - sigma_hat # store for 2nd order step _lowercase : List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _lowercase : Optional[Any] = sigma_next - sigma_hat _lowercase : Any = self.sample _lowercase : Optional[int] = None _lowercase : str = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples _lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ): # mps does not support float64 _lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) _lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: _lowercase : List[Any] = self.timesteps.to(original_samples.device ) _lowercase : Union[str, Any] = timesteps.to(original_samples.device ) _lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps] _lowercase : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _lowercase : List[Any] = sigma.unsqueeze(-1 ) _lowercase : int = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
336
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase ( snake_case ): """simple docstring""" def __init__( self ,UpperCAmelCase_ = "▁" ,UpperCAmelCase_ = True ,UpperCAmelCase_ = "<unk>" ,UpperCAmelCase_ = "</s>" ,UpperCAmelCase_ = "<pad>" ,): _lowercase : List[Any] = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } _lowercase : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): _lowercase : str = token_dict["""token"""] _lowercase : Optional[int] = Tokenizer(Unigram() ) _lowercase : str = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) _lowercase : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ), pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ), pre_tokenizers.Punctuation(), ] ) _lowercase : Tuple = decoders.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ) _lowercase : List[Any] = TemplateProcessing( single=f"""$A {self.special_tokens["eos"]["token"]}""" ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) _lowercase : Union[str, Any] = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 80_00 ,UpperCAmelCase_ = True ,): _lowercase : Any = trainers.UnigramTrainer( vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Any = [files] self._tokenizer.train(UpperCAmelCase_ ,trainer=UpperCAmelCase_ ) self.add_unk_id() def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 80_00 ,UpperCAmelCase_ = True ,): _lowercase : Union[str, Any] = trainers.UnigramTrainer( vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,) self._tokenizer.train_from_iterator(UpperCAmelCase_ ,trainer=UpperCAmelCase_ ) self.add_unk_id() def lowerCamelCase__ ( self ): _lowercase : List[Any] = json.loads(self._tokenizer.to_str() ) _lowercase : Optional[Any] = self.special_tokens["""unk"""]["""id"""] _lowercase : str = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
336
"""simple docstring""" import pprint import requests UpperCAmelCase: Tuple = """https://zenquotes.io/api""" def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCAmelCase: int = random_quotes() pprint.pprint(response)
336
1
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: Optional[int] = """T5Config""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : List[str] = jnp.zeros_like(__UpperCAmelCase ) _lowercase : int = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) _lowercase : str = shifted_input_ids.at[:, 0].set(__UpperCAmelCase ) _lowercase : Union[str, Any] = jnp.where(shifted_input_ids == -100 , __UpperCAmelCase , __UpperCAmelCase ) return shifted_input_ids class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = "mt5" SCREAMING_SNAKE_CASE_ : int = MTaConfig class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "mt5" SCREAMING_SNAKE_CASE_ : str = MTaConfig class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = "mt5" SCREAMING_SNAKE_CASE_ : List[Any] = MTaConfig
336
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : int def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _lowercase : Tuple = all_rotations(__UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCAmelCase ), } return response def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _lowercase : Optional[Any] = int(__UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _lowercase : int = [""""""] * len(__UpperCAmelCase ) for _ in range(len(__UpperCAmelCase ) ): for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """ UpperCAmelCase: int = input(entry_msg).strip() UpperCAmelCase: List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
336
1
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase: List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase: int = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
336
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCAmelCase_ ,**UpperCAmelCase_ ): pass @is_pipeline_test @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Tuple = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _lowercase : Tuple = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = vqa_pipeline(UpperCAmelCase_ ,top_k=1 ) self.assertEqual( UpperCAmelCase_ ,[ [{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}], [{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}], ] ,) @require_torch def lowerCamelCase__ ( self ): _lowercase : int = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _lowercase : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowercase : Any = """How many cats are there?""" _lowercase : str = vqa_pipeline(image=UpperCAmelCase_ ,question="""How many cats are there?""" ,top_k=2 ) self.assertEqual( UpperCAmelCase_ ,[{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}, {"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}] ) _lowercase : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( UpperCAmelCase_ ,[{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}, {"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}] ) @slow @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" ) _lowercase : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowercase : List[Any] = """How many cats are there?""" _lowercase : Dict = vqa_pipeline(image=UpperCAmelCase_ ,question=UpperCAmelCase_ ,top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) _lowercase : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) _lowercase : List[Any] = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ ,decimals=4 ) ,[[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def lowerCamelCase__ ( self ): pass
336
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
1
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCAmelCase: List[Any] = logging.getLogger(__name__) @dataclass(frozen=snake_case ) class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : Optional[str] = None SCREAMING_SNAKE_CASE_ : Optional[str] = None SCREAMING_SNAKE_CASE_ : Optional[str] = None @dataclass(frozen=snake_case ) class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[int] SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None SCREAMING_SNAKE_CASE_ : Optional[Union[int, float]] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[InputFeatures] def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_=False ,UpperCAmelCase_ = False ,): _lowercase : List[Any] = hans_processors[task]() _lowercase : Union[str, Any] = os.path.join( UpperCAmelCase_ ,"""cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(UpperCAmelCase_ ) ,UpperCAmelCase_ ,) ,) _lowercase : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _lowercase , _lowercase : List[Any] = label_list[2], label_list[1] _lowercase : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _lowercase : int = cached_features_file + """.lock""" with FileLock(UpperCAmelCase_ ): if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) _lowercase : str = torch.load(UpperCAmelCase_ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) _lowercase : int = ( processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ ) ) logger.info("""Training examples: %s""" ,len(UpperCAmelCase_ ) ) _lowercase : Dict = hans_convert_examples_to_features(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) logger.info("""Saving features into cached file %s""" ,UpperCAmelCase_ ) torch.save(self.features ,UpperCAmelCase_ ) def __len__( self ): return len(self.features ) def __getitem__( self ,UpperCAmelCase_ ): return self.features[i] def lowerCamelCase__ ( self ): return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[InputFeatures] def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = 1_28 ,UpperCAmelCase_=False ,UpperCAmelCase_ = False ,): _lowercase : Any = hans_processors[task]() _lowercase : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _lowercase , _lowercase : str = label_list[2], label_list[1] _lowercase : Dict = label_list _lowercase : List[str] = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ ) _lowercase : Optional[int] = hans_convert_examples_to_features(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCAmelCase_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) _lowercase : Tuple = tf.data.Dataset.from_generator( UpperCAmelCase_ ,( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) ,( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) ,) def lowerCamelCase__ ( self ): return self.dataset def __len__( self ): return len(self.features ) def __getitem__( self ,UpperCAmelCase_ ): return self.features[i] def lowerCamelCase__ ( self ): return self.label_list class UpperCamelCase ( snake_case ): """simple docstring""" def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" ) def lowerCamelCase__ ( self ): return ["contradiction", "entailment", "neutral"] def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = [] for i, line in enumerate(UpperCAmelCase_ ): if i == 0: continue _lowercase : Union[str, Any] = """%s-%s""" % (set_type, line[0]) _lowercase : int = line[5] _lowercase : Union[str, Any] = line[6] _lowercase : str = line[7][2:] if line[7].startswith("""ex""" ) else line[7] _lowercase : int = line[0] examples.append(InputExample(guid=UpperCAmelCase_ ,text_a=UpperCAmelCase_ ,text_b=UpperCAmelCase_ ,label=UpperCAmelCase_ ,pairID=UpperCAmelCase_ ) ) return examples def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): _lowercase : List[str] = {label: i for i, label in enumerate(__UpperCAmelCase )} _lowercase : int = [] for ex_index, example in tqdm.tqdm(enumerate(__UpperCAmelCase ) , desc="""convert examples to features""" ): if ex_index % 10000 == 0: logger.info("""Writing example %d""" % (ex_index) ) _lowercase : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=__UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" , truncation=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , ) _lowercase : List[str] = label_map[example.label] if example.label in label_map else 0 _lowercase : int = int(example.pairID ) features.append(InputFeatures(**__UpperCAmelCase , label=__UpperCAmelCase , pairID=__UpperCAmelCase ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(F"""guid: {example}""" ) logger.info(F"""features: {features[i]}""" ) return features UpperCAmelCase: List[Any] = { """hans""": 3, } UpperCAmelCase: int = { """hans""": HansProcessor, }
336
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase: Tuple = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Tuple = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,): _lowercase : Dict = vocab_size _lowercase : List[str] = action_weight _lowercase : int = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : Any = action_dim _lowercase : List[str] = observation_dim _lowercase : Union[str, Any] = transition_dim _lowercase : str = learning_rate _lowercase : Tuple = n_layer _lowercase : Optional[int] = n_head _lowercase : List[str] = n_embd _lowercase : List[str] = embd_pdrop _lowercase : Optional[Any] = attn_pdrop _lowercase : List[Any] = resid_pdrop _lowercase : str = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : List[Any] = kaiming_initializer_range _lowercase : List[Any] = use_cache super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
336
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging UpperCAmelCase: Optional[int] = logging.get_logger(__name__) class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : str = None @staticmethod def lowerCamelCase__ ( ): raise NotImplementedError def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ): raise NotImplementedError def lowerCamelCase__ ( self ,UpperCAmelCase_ ): raise NotImplementedError def lowerCamelCase__ ( self ): if not self.is_available(): raise RuntimeError( f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def lowerCamelCase__ ( cls ): return f"""`pip install {cls.pip_package or cls.name}`""" class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "optuna" @staticmethod def lowerCamelCase__ ( ): return is_optuna_available() def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return run_hp_search_optuna(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return default_hp_space_optuna(UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "ray" SCREAMING_SNAKE_CASE_ : List[Any] = "'ray[tune]'" @staticmethod def lowerCamelCase__ ( ): return is_ray_available() def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return run_hp_search_ray(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return default_hp_space_ray(UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "sigopt" @staticmethod def lowerCamelCase__ ( ): return is_sigopt_available() def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return run_hp_search_sigopt(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return default_hp_space_sigopt(UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "wandb" @staticmethod def lowerCamelCase__ ( ): return is_wandb_available() def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ): return run_hp_search_wandb(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return default_hp_space_wandb(UpperCAmelCase_ ) UpperCAmelCase: Optional[int] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __SCREAMING_SNAKE_CASE ( ): _lowercase : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__UpperCAmelCase ) > 0: _lowercase : str = available_backends[0].name if len(__UpperCAmelCase ) > 1: logger.info( F"""{len(__UpperCAmelCase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
336
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: List[str] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model" def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Tuple = intermediate_size _lowercase : List[Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = patch_size _lowercase : Optional[Any] = image_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[Any] = attention_dropout _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : Tuple = qkv_bias @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = hidden_act _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Tuple = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Any = position_embedding_type _lowercase : Dict = cross_attention_frequency _lowercase : Optional[Any] = encoder_hidden_size @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : str = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "instructblip" SCREAMING_SNAKE_CASE_ : List[str] = True def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) if vision_config is None: _lowercase : str = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: _lowercase : Any = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: _lowercase : Optional[int] = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ ) _lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ ) _lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : Union[str, Any] = self.text_config.is_encoder_decoder _lowercase : List[str] = num_query_tokens _lowercase : List[str] = self.vision_config.hidden_size _lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : Union[str, Any] = 1.0 _lowercase : Dict = 0.02 @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowercase : int = self.vision_config.to_dict() _lowercase : Any = self.qformer_config.to_dict() _lowercase : Any = self.text_config.to_dict() _lowercase : Optional[int] = self.__class__.model_type return output
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _lowercase : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowercase : Dict = {"""unk_token""": """<unk>"""} _lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) _lowercase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,UpperCAmelCase_ ) self.assertIn("""attention_mask""" ,UpperCAmelCase_ ) self.assertNotIn("""labels""" ,UpperCAmelCase_ ) self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : List[Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(batch.input_ids.shape ,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = ["""A long paragraph for summarization."""] _lowercase : Dict = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = inputs["""input_ids"""] _lowercase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : str = ["""Summary of the text.""", """Another summary."""] _lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ) _lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]] _lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = """A, <mask> AllenNLP sentence.""" _lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) _lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
336
1
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCAmelCase: List[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") UpperCAmelCase: List[str] = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split() UpperCAmelCase: Union[str, Any] = """|""".join(sys.argv[1:]) UpperCAmelCase: Tuple = re.compile(rF'^({joined_dirs}).*?\.py$') UpperCAmelCase: Tuple = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
336
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
1
"""simple docstring""" import argparse from collections import defaultdict import yaml UpperCAmelCase: str = """docs/source/en/_toctree.yml""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : List[Any] = defaultdict(__UpperCAmelCase ) for doc in model_doc: counts[doc["local"]] += 1 _lowercase : Union[str, Any] = [key for key, value in counts.items() if value > 1] _lowercase : str = [] for duplicate_key in duplicates: _lowercase : Optional[Any] = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} ) if len(__UpperCAmelCase ) > 1: raise ValueError( F"""{duplicate_key} is present several times in the documentation table of content at """ """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] ) # Sort return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=False ): with open(__UpperCAmelCase , encoding="""utf-8""" ) as f: _lowercase : Any = yaml.safe_load(f.read() ) # Get to the API doc _lowercase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowercase : Optional[int] = content[api_idx]["""sections"""] # Then to the model doc _lowercase : Dict = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _lowercase : int = api_doc[model_idx]["""sections"""] _lowercase : Dict = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if """sections""" in section] _lowercase : str = False for idx, modality_doc in modalities_docs: _lowercase : Dict = modality_doc["""sections"""] _lowercase : List[str] = clean_model_doc_toc(__UpperCAmelCase ) if old_modality_doc != new_modality_doc: _lowercase : Optional[int] = True if overwrite: _lowercase : Dict = new_modality_doc if diff: if overwrite: _lowercase : List[Any] = model_doc _lowercase : Tuple = api_doc with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": UpperCAmelCase: Any = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase: List[Any] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
336
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
1
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
336
"""simple docstring""" UpperCAmelCase: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase: int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError("""iterations must be defined as integers""" ) if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" ) if not iterations >= 1: raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" ) _lowercase : List[str] = """""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
1
"""simple docstring""" import numpy # List of input, output pairs UpperCAmelCase: str = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCAmelCase: Any = (((515, 22, 13), 555), ((61, 35, 49), 150)) UpperCAmelCase: str = [2, 4, 1, 5] UpperCAmelCase: List[str] = len(train_data) UpperCAmelCase: str = 0.009 def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase="train" ): return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output( __UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = 0 for i in range(len(__UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=m ): _lowercase : Tuple = 0 for i in range(__UpperCAmelCase ): if index == -1: summation_value += _error(__UpperCAmelCase ) else: summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index] return summation_value def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m return cost_derivative_value def __SCREAMING_SNAKE_CASE ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output _lowercase : Optional[Any] = 0.0_0_0_0_0_2 _lowercase : List[Any] = 0 _lowercase : Tuple = 0 while True: j += 1 _lowercase : Any = [0, 0, 0, 0] for i in range(0 , len(__UpperCAmelCase ) ): _lowercase : Tuple = get_cost_derivative(i - 1 ) _lowercase : Any = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ): break _lowercase : str = temp_parameter_vector print(("""Number of iterations:""", j) ) def __SCREAMING_SNAKE_CASE ( ): for i in range(len(__UpperCAmelCase ) ): print(("""Actual output value:""", output(__UpperCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__UpperCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
336
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase: Tuple = [0, 25, 50] UpperCAmelCase: List[Any] = [25, 50, 75] UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca) UpperCAmelCase: Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase: List[Any] = np.ones(75) UpperCAmelCase: Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase: int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase: int = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
336
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file UpperCAmelCase: List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`.""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=None ): if subparsers is not None: _lowercase : int = subparsers.add_parser("""tpu-config""" , description=_description ) else: _lowercase : Optional[int] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments _lowercase : List[str] = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=__UpperCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=__UpperCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) _lowercase : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=__UpperCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=__UpperCAmelCase ) return parser def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Union[str, Any] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__UpperCAmelCase ): _lowercase : Tuple = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: _lowercase : List[str] = defaults.command_file if not args.command and defaults.commands is not None: _lowercase : int = defaults.commands if not args.tpu_name: _lowercase : Optional[int] = defaults.tpu_name if not args.tpu_zone: _lowercase : Dict = defaults.tpu_zone if args.accelerate_version == "dev": _lowercase : int = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": _lowercase : List[Any] = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , __UpperCAmelCase ): _lowercase : List[str] = F"""accelerate=={args.accelerate_version}""" if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: _lowercase : Dict = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __UpperCAmelCase ): _lowercase : Optional[Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate _lowercase : int = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [F"""pip install {args.accelerate_version}"""] new_cmd += args.command _lowercase : Optional[int] = """; """.join(__UpperCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess _lowercase : Any = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"""Running {" ".join(__UpperCAmelCase )}""" ) return subprocess.run(__UpperCAmelCase ) print("""Successfully setup pod.""" ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Union[str, Any] = tpu_command_parser() _lowercase : Optional[int] = parser.parse_args() tpu_command_launcher(__UpperCAmelCase )
336
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ): _lowercase : str = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowercase : Optional[int] = {"""unk_token""": """<unk>"""} _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) _lowercase : Dict = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } _lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] _lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_tokenizer() _lowercase : List[Any] = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ ) _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) _lowercase : int = CLIPProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : int = self.prepare_image_inputs() _lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" ) _lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Optional[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : List[Any] = """lower newer""" _lowercase : Any = processor(text=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : str = """lower newer""" _lowercase : List[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowerCamelCase__ ( self ): _lowercase : Dict = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : int = processor.batch_decode(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ ) _lowercase : Optional[Any] = """lower newer""" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
336
1
"""simple docstring""" from math import factorial UpperCAmelCase: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 60 , __UpperCAmelCase = 1000000 ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length _lowercase : str = 0 # the cached sizes of the previous chains _lowercase : dict[int, int] = {} for start_chain_element in range(1 , __UpperCAmelCase ): # The temporary set will contain the elements of the chain _lowercase : Optional[int] = set() _lowercase : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _lowercase : Optional[int] = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__UpperCAmelCase ) chain_set_length += 1 _lowercase : Optional[Any] = digit_factorial_sum(__UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _lowercase : List[str] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
336
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ): import pyspark def generate_fn(): _lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" ) _lowercase : int = partition_df.collect() _lowercase : Dict = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class UpperCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = df _lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ ) return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return len(self.partition_order ) class UpperCamelCase ( datasets.DatasetBuilder ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = SparkConfig def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): import pyspark _lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowercase : List[Any] = df _lowercase : int = working_dir super().__init__( cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ ) _lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase_ ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowercase : List[str] = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase__ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase__ ( self ,UpperCAmelCase_ ): import pyspark def get_arrow_batch_size(UpperCAmelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowercase : List[str] = self.df.count() _lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowercase : Union[str, Any] = ( self.df.limit(UpperCAmelCase_ ) .repartition(1 ) .mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowercase : List[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) ) _lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): import pyspark _lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter _lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath _lowercase : Any = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowercase : Union[str, Any] = self.config.features _lowercase : Optional[int] = self._writer_batch_size _lowercase : Optional[Any] = self._fs.storage_options def write_arrow(UpperCAmelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowercase : Any = pyspark.TaskContext().taskAttemptId() _lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) _lowercase : List[Any] = 0 _lowercase : int = writer_class( features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowercase , _lowercase : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 _lowercase : Union[str, Any] = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,) _lowercase : Dict = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase_ ) if writer._num_bytes > 0: _lowercase , _lowercase : Dict = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ): _lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) ) shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : List[str] = ( self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): self._validate_cache_dir() _lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase_ ) _lowercase : Optional[int] = not is_remote_filesystem(self._fs ) _lowercase : Dict = os.path.join if is_local else posixpath.join _lowercase : int = """-TTTTT-SSSSS-of-NNNNN""" _lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" _lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ ) _lowercase : List[Any] = 0 _lowercase : Optional[Any] = 0 _lowercase : int = 0 _lowercase : Any = [] _lowercase : Any = [] for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase_ ) _lowercase : Optional[int] = total_num_examples _lowercase : List[Any] = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: _lowercase : List[Any] = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowercase : Union[str, Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): rename( UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,) _lowercase : Optional[Any] = [] _lowercase : List[str] = 0 for i in range(len(UpperCAmelCase_ ) ): _lowercase , _lowercase : List[str] = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect() else: # don't use any pattern _lowercase : Tuple = 0 _lowercase : Optional[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,): return SparkExamplesIterable(self.df )
336
1
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _lowercase : Tuple = list(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Tuple = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : List[Any] = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = None , __UpperCAmelCase = 128 ): if function is None: return functools.partial(__UpperCAmelCase , starting_batch_size=__UpperCAmelCase ) _lowercase : Any = starting_batch_size def decorator(*__UpperCAmelCase , **__UpperCAmelCase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _lowercase : Tuple = list(inspect.signature(__UpperCAmelCase ).parameters.keys() ) # Guard against user error if len(__UpperCAmelCase ) < (len(__UpperCAmelCase ) + 1): _lowercase : Union[str, Any] = """, """.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) except Exception as e: if should_reduce_batch_size(__UpperCAmelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
336
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = """<s>""" _lowercase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""<eod>""" ) self.assertEqual(len(UpperCAmelCase_ ) ,10_06 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_00 ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] ) _lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ ( self ): _lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] ,) @slow def lowerCamelCase__ ( self ): _lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) _lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ ) _lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ ( self ): # fmt: off _lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError("""String lengths must match!""" ) _lowercase : int = 0 for chara, chara in zip(__UpperCAmelCase , __UpperCAmelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
336
1
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,): _lowercase : Union[str, Any] = parent _lowercase : int = 13 _lowercase : Optional[int] = 7 _lowercase : str = 30 _lowercase : Optional[Any] = self.seq_length + self.mem_len _lowercase : Dict = 15 _lowercase : Union[str, Any] = True _lowercase : Optional[int] = True _lowercase : Optional[int] = 99 _lowercase : int = [10, 50, 80] _lowercase : List[str] = 32 _lowercase : str = 32 _lowercase : str = 4 _lowercase : Tuple = 8 _lowercase : List[str] = 1_28 _lowercase : List[Any] = 2 _lowercase : Union[str, Any] = 2 _lowercase : Optional[Any] = None _lowercase : Any = 1 _lowercase : List[str] = 0 _lowercase : Optional[int] = 3 _lowercase : Tuple = self.vocab_size - 1 _lowercase : str = 0.01 def lowerCamelCase__ ( self ): _lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowercase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowercase : Any = None if self.use_labels: _lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowercase : Optional[int] = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase__ ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Union[str, Any] = TFTransfoXLModel(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = model(UpperCAmelCase_ ).to_tuple() _lowercase : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a} _lowercase , _lowercase : List[Any] = model(UpperCAmelCase_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : List[Any] = TFTransfoXLLMHeadModel(UpperCAmelCase_ ) _lowercase , _lowercase : str = model(UpperCAmelCase_ ).to_tuple() _lowercase : Any = {"""input_ids""": input_ids_a, """labels""": lm_labels} _lowercase , _lowercase : Optional[Any] = model(UpperCAmelCase_ ).to_tuple() _lowercase , _lowercase : str = model([input_ids_a, mems_a] ).to_tuple() _lowercase : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} _lowercase , _lowercase : Dict = model(UpperCAmelCase_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = TFTransfoXLForSequenceClassification(UpperCAmelCase_ ) _lowercase : List[str] = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self ): _lowercase : str = self.prepare_config_and_inputs() ((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : List[str] = config_and_inputs _lowercase : Union[str, Any] = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ : int = () if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Any = ( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE_ : int = False SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : Tuple = False def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase__ ( self ): _lowercase : Tuple = TFTransfoXLModelTester(self ) _lowercase : Optional[Any] = ConfigTester(self ,config_class=UpperCAmelCase_ ,d_embed=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): self.model_tester.set_seed() _lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): self.model_tester.set_seed() _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _lowercase : Dict = model_class(UpperCAmelCase_ ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _lowercase : Tuple = model.get_output_embeddings() assert isinstance(UpperCAmelCase_ ,tf.keras.layers.Layer ) _lowercase : Any = model.get_bias() assert name is None else: _lowercase : Any = model.get_output_embeddings() assert x is None _lowercase : Tuple = model.get_bias() assert name is None def lowerCamelCase__ ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase__ ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[str] = TFTransfoXLModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def lowerCamelCase__ ( self ): pass @require_tf class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def lowerCamelCase__ ( self ): _lowercase : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off _lowercase : Any = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _lowercase : Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _lowercase : List[str] = model.generate(UpperCAmelCase_ ,max_length=2_00 ,do_sample=UpperCAmelCase_ ) self.assertListEqual(output_ids[0].numpy().tolist() ,UpperCAmelCase_ )
336
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : int = [] for line in lines: _lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments if line: filtered_lines.append(__UpperCAmelCase ) _lowercase : Tuple = """\n""".join(__UpperCAmelCase ) # Make a hash from all this code _lowercase : Tuple = full_str.encode("""utf-8""" ) return shaaaa(__UpperCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase: Tuple = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase: List[str] = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name UpperCAmelCase: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
336
1
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=64 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=[1, 16, 4, 4] ,UpperCAmelCase_=None ,): _lowercase : Union[str, Any] = parent _lowercase : Optional[Any] = batch_size _lowercase : Dict = image_size _lowercase : Union[str, Any] = patch_size _lowercase : str = num_channels _lowercase : Any = is_training _lowercase : Optional[Any] = use_labels _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : List[Any] = intermediate_size _lowercase : Optional[int] = hidden_act _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : Tuple = scope _lowercase : Optional[Any] = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size _lowercase : Tuple = (self.image_size // 32) ** 2 _lowercase : Union[str, Any] = num_patches + 1 def lowerCamelCase__ ( self ): _lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : Tuple = None if self.use_labels: _lowercase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowercase : Dict = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self ): _lowercase : List[Any] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=UpperCAmelCase_ ,) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : int = ViTHybridModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowercase : List[str] = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Dict = self.type_sequence_label_size _lowercase : Optional[int] = ViTHybridForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _lowercase : Optional[Any] = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self ): _lowercase : Optional[int] = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Optional[int] = config_and_inputs _lowercase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Optional[int] = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : Any = False def lowerCamelCase__ ( self ): _lowercase : Dict = ViTHybridModelTester(self ) _lowercase : str = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : int = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _lowercase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : List[str] = model_class(UpperCAmelCase_ ) _lowercase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Optional[Any] = [*signature.parameters.keys()] _lowercase : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : str = _config_zero_init(UpperCAmelCase_ ) for model_class in self.all_model_classes: _lowercase : Optional[Any] = model_class(config=UpperCAmelCase_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": _lowercase : Any = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @slow def lowerCamelCase__ ( self ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[Any] = ViTHybridModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( UpperCAmelCase_ ) _lowercase : Union[str, Any] = self.default_image_processor _lowercase : Dict = prepare_img() _lowercase : Dict = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): _lowercase : List[str] = model(**UpperCAmelCase_ ) # verify the logits _lowercase : Optional[int] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,UpperCAmelCase_ ) _lowercase : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCAmelCase_ ,atol=1E-4 ) ) @slow @require_accelerate def lowerCamelCase__ ( self ): _lowercase : Dict = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) _lowercase : Union[str, Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" ,device_map="""auto""" ) _lowercase : Optional[Any] = prepare_img() _lowercase : Optional[Any] = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[Any] = model(**UpperCAmelCase_ ) _lowercase : str = outputs.logits # model predicts one of the 1000 ImageNet classes _lowercase : Union[str, Any] = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] ,"""tabby, tabby cat""" )
336
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
336
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.48145466, 0.4578275, 0.40821073] ,UpperCAmelCase_=[0.26862954, 0.26130258, 0.27577711] ,UpperCAmelCase_=True ,): _lowercase : Optional[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24} _lowercase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _lowercase : int = parent _lowercase : Tuple = batch_size _lowercase : int = num_channels _lowercase : Dict = image_size _lowercase : Any = min_resolution _lowercase : List[str] = max_resolution _lowercase : Any = do_resize _lowercase : Optional[int] = size _lowercase : Dict = do_center_crop _lowercase : Optional[Any] = crop_size _lowercase : Tuple = do_normalize _lowercase : List[str] = image_mean _lowercase : str = image_std _lowercase : Any = do_convert_rgb def lowerCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCamelCase__ ( self ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _lowercase : Union[str, Any] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: _lowercase : Optional[int] = [] for i in range(self.batch_size ): _lowercase , _lowercase : Optional[Any] = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _lowercase : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] if torchify: _lowercase : Union[str, Any] = [torch.from_numpy(UpperCAmelCase_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self ): _lowercase : int = ChineseCLIPImageProcessingTester(self ,do_center_crop=UpperCAmelCase_ ) @property def lowerCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self ): _lowercase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_convert_rgb""" ) ) def lowerCamelCase__ ( self ): _lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 2_24, """width""": 2_24} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) _lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input _lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Tuple = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowercase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,np.ndarray ) # Test not batched input _lowercase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : str = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowercase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor ) # Test not batched input _lowercase : int = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : Any = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self ): _lowercase : Any = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=UpperCAmelCase_ ) _lowercase : List[Any] = 3 @property def lowerCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self ): _lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""center_crop""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) ) self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_convert_rgb""" ) ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): # Initialize image_processing _lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ ,Image.Image ) # Test not batched input _lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched _lowercase : List[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
336
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] UpperCAmelCase: Any = generate_large_matrix() UpperCAmelCase: Dict = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid ) assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 _lowercase : List[Any] = len(__UpperCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _lowercase : Tuple = (left + right) // 2 _lowercase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _lowercase : Dict = mid + 1 else: _lowercase : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Any = 0 _lowercase : Optional[int] = len(grid[0] ) for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(__UpperCAmelCase ) * len(grid[0] )) - total def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): _lowercase : Tuple = 0 for row in grid: for i, number in enumerate(__UpperCAmelCase ): if number < 0: total += len(__UpperCAmelCase ) - i break return total def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print("""Running benchmarks""" ) _lowercase : Tuple = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
336
1
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase: List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase: int = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
336
1
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=True , __UpperCAmelCase=2 ): from .. import __version__ _lowercase : Tuple = take_from _lowercase : Union[str, Any] = () if not isinstance(args[0] , __UpperCAmelCase ): _lowercase : Optional[int] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse(__UpperCAmelCase ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""" ) _lowercase : List[str] = None if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCAmelCase ),) _lowercase : Union[str, Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(__UpperCAmelCase , __UpperCAmelCase ): values += (getattr(__UpperCAmelCase , __UpperCAmelCase ),) _lowercase : List[str] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: _lowercase : int = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: _lowercase : Dict = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , __UpperCAmelCase , stacklevel=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0: _lowercase : Tuple = inspect.getouterframes(inspect.currentframe() )[1] _lowercase : List[Any] = call_frame.filename _lowercase : Tuple = call_frame.lineno _lowercase : Optional[Any] = call_frame.function _lowercase , _lowercase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(__UpperCAmelCase ) == 0: return elif len(__UpperCAmelCase ) == 1: return values[0] return values
336
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) _lowercase : str = [] for i in range(__UpperCAmelCase ): _lowercase : Any = i / num_diffusion_timesteps _lowercase : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCamelCase ( snake_case , snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ : str = 2 @register_to_config def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,): if trained_betas is not None: _lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "linear": _lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _lowercase : Any = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) _lowercase : Tuple = 1.0 - self.betas _lowercase : Dict = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ): if schedule_timesteps is None: _lowercase : Optional[int] = self.timesteps _lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0 else: _lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep _lowercase : List[str] = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCamelCase__ ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,): _lowercase : str = self.index_for_timestep(UpperCAmelCase_ ) if self.state_in_first_order: _lowercase : Optional[Any] = self.sigmas[step_index] else: _lowercase : Dict = self.sigmas_interpol[step_index] _lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,): _lowercase : List[str] = num_inference_steps _lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _lowercase : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) _lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) _lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ ) _lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ ) # interpolate sigmas _lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() _lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _lowercase : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(UpperCAmelCase_ ).startswith("""mps""" ): # mps does not support float64 _lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa ) else: _lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) # interpolate timesteps _lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype ) _lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() _lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] ) _lowercase : List[Any] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): # get log sigma _lowercase : Optional[Any] = sigma.log() # get distribution _lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None] # get sigmas range _lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _lowercase : List[Any] = low_idx + 1 _lowercase : int = self.log_sigmas[low_idx] _lowercase : Any = self.log_sigmas[high_idx] # interpolate sigmas _lowercase : Any = (low - log_sigma) / (low - high) _lowercase : Dict = w.clamp(0 ,1 ) # transform interpolation to time range _lowercase : List[str] = (1 - w) * low_idx + w * high_idx _lowercase : Optional[int] = t.view(sigma.shape ) return t @property def lowerCamelCase__ ( self ): return self.sample is None def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,): _lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ ) # advance index counter by 1 _lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _lowercase : Any = self.sigmas[step_index] _lowercase : Any = self.sigmas_interpol[step_index + 1] _lowercase : Tuple = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _lowercase : Union[str, Any] = self.sigmas[step_index - 1] _lowercase : int = self.sigmas_interpol[step_index] _lowercase : Tuple = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _lowercase : Any = 0 _lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol _lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _lowercase : List[str] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _lowercase : Any = sigma_interpol - sigma_hat # store for 2nd order step _lowercase : List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _lowercase : Optional[Any] = sigma_next - sigma_hat _lowercase : Any = self.sample _lowercase : Optional[int] = None _lowercase : str = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples _lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ): # mps does not support float64 _lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) _lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: _lowercase : List[Any] = self.timesteps.to(original_samples.device ) _lowercase : Union[str, Any] = timesteps.to(original_samples.device ) _lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps] _lowercase : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _lowercase : List[Any] = sigma.unsqueeze(-1 ) _lowercase : int = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
336
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase: List[str] = logging.get_logger(__name__) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = "upernet" def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=[1, 2, 3, 6] ,UpperCAmelCase_=True ,UpperCAmelCase_=0.4 ,UpperCAmelCase_=3_84 ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=1 ,UpperCAmelCase_=False ,UpperCAmelCase_=2_55 ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _lowercase : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = backbone_config.get("""model_type""" ) _lowercase : str = CONFIG_MAPPING[backbone_model_type] _lowercase : Dict = config_class.from_dict(UpperCAmelCase_ ) _lowercase : int = backbone_config _lowercase : Optional[Any] = hidden_size _lowercase : Any = initializer_range _lowercase : Union[str, Any] = pool_scales _lowercase : Tuple = use_auxiliary_head _lowercase : Dict = auxiliary_loss_weight _lowercase : str = auxiliary_in_channels _lowercase : Any = auxiliary_channels _lowercase : Optional[int] = auxiliary_num_convs _lowercase : Optional[int] = auxiliary_concat_input _lowercase : int = loss_ignore_index def lowerCamelCase__ ( self ): _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Tuple = self.backbone_config.to_dict() _lowercase : Any = self.__class__.model_type return output
336
"""simple docstring""" import pprint import requests UpperCAmelCase: Tuple = """https://zenquotes.io/api""" def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def __SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": UpperCAmelCase: int = random_quotes() pprint.pprint(response)
336
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase: Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: int = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
336
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : int def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) _lowercase : Tuple = all_rotations(__UpperCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCAmelCase ), } return response def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: _lowercase : Optional[Any] = int(__UpperCAmelCase ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__UpperCAmelCase ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) _lowercase : int = [""""""] * len(__UpperCAmelCase ) for _ in range(len(__UpperCAmelCase ) ): for i in range(len(__UpperCAmelCase ) ): _lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """ UpperCAmelCase: int = input(entry_msg).strip() UpperCAmelCase: List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
336
1
"""simple docstring""" from __future__ import annotations UpperCAmelCase: List[str] = 1.6021e-19 # units = C def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
336
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
1
"""simple docstring""" import requests UpperCAmelCase: Dict = """YOUR API KEY""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = giphy_api_key ): _lowercase : Optional[Any] = """+""".join(query.split() ) _lowercase : str = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" _lowercase : Any = requests.get(__UpperCAmelCase ).json()["""data"""] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("""\n""".join(get_gifs("""space ship""")))
336
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ) # add QFormer tokenizer _lowercase : Optional[int] = qformer_tokenizer def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) _lowercase : List[Any] = BatchFeature() if text is not None: _lowercase : List[str] = self.tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) encoding.update(UpperCAmelCase_ ) _lowercase : Dict = self.qformer_tokenizer( text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,) _lowercase : str = qformer_text_encoding.pop("""input_ids""" ) _lowercase : int = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: _lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ): if os.path.isfile(UpperCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ ) _lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ ) return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): _lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" ) _lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) args.append(UpperCAmelCase_ ) return cls(*UpperCAmelCase_ )
336
1
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase: Tuple = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
336
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase: Tuple = logging.get_logger(__name__) UpperCAmelCase: List[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Tuple = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,): _lowercase : Dict = vocab_size _lowercase : List[str] = action_weight _lowercase : int = reward_weight _lowercase : List[Any] = value_weight _lowercase : List[str] = max_position_embeddings _lowercase : Any = block_size _lowercase : Any = action_dim _lowercase : List[str] = observation_dim _lowercase : Union[str, Any] = transition_dim _lowercase : str = learning_rate _lowercase : Tuple = n_layer _lowercase : Optional[int] = n_head _lowercase : List[str] = n_embd _lowercase : List[str] = embd_pdrop _lowercase : Optional[Any] = attn_pdrop _lowercase : List[Any] = resid_pdrop _lowercase : str = initializer_range _lowercase : Optional[Any] = layer_norm_eps _lowercase : List[Any] = kaiming_initializer_range _lowercase : List[Any] = use_cache super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
336
1
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ): try: _lowercase : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _lowercase : Tuple = default else: # KEY is set, convert it to True or False. try: _lowercase : Optional[int] = strtobool(__UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value UpperCAmelCase: Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False) UpperCAmelCase: Dict = parse_flag_from_env("""RUN_REMOTE""", default=False) UpperCAmelCase: Tuple = parse_flag_from_env("""RUN_LOCAL""", default=True) UpperCAmelCase: Tuple = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression UpperCAmelCase: Union[str, Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") UpperCAmelCase: List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") UpperCAmelCase: Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio UpperCAmelCase: Union[str, Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam UpperCAmelCase: str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility UpperCAmelCase: Tuple = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows UpperCAmelCase: Optional[int] = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import faiss # noqa except ImportError: _lowercase : List[Any] = unittest.skip("""test requires faiss""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import regex # noqa except ImportError: _lowercase : int = unittest.skip("""test requires regex""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import elasticsearch # noqa except ImportError: _lowercase : int = unittest.skip("""test requires elasticsearch""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import sqlalchemy # noqa except ImportError: _lowercase : Dict = unittest.skip("""test requires sqlalchemy""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not config.TORCH_AVAILABLE: _lowercase : Optional[int] = unittest.skip("""test requires PyTorch""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not config.TF_AVAILABLE: _lowercase : Any = unittest.skip("""test requires TensorFlow""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not config.JAX_AVAILABLE: _lowercase : str = unittest.skip("""test requires JAX""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not config.PIL_AVAILABLE: _lowercase : str = unittest.skip("""test requires Pillow""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__UpperCAmelCase ) else: return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__UpperCAmelCase ) else: return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__UpperCAmelCase ) else: return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): def _require_spacy_model(__UpperCAmelCase ): try: import spacy # noqa F401 spacy.load(__UpperCAmelCase ) except ImportError: return unittest.skip("""test requires spacy""" )(__UpperCAmelCase ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__UpperCAmelCase ) )(__UpperCAmelCase ) else: return test_case return _require_spacy_model def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__UpperCAmelCase ) else: return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__UpperCAmelCase ) else: return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not _run_slow_tests or _run_slow_tests == 0: _lowercase : Any = unittest.skip("""test is slow""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not _run_local_tests or _run_local_tests == 0: _lowercase : Optional[int] = unittest.skip("""test is local""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not _run_packaged_tests or _run_packaged_tests == 0: _lowercase : List[Any] = unittest.skip("""test is packaged""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not _run_remote_tests or _run_remote_tests == 0: _lowercase : Optional[Any] = unittest.skip("""test requires remote""" )(__UpperCAmelCase ) return test_case def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__UpperCAmelCase ) and name.startswith("""test""" ): for decorator in decorators: _lowercase : Optional[int] = decorator(__UpperCAmelCase ) setattr(cls , __UpperCAmelCase , __UpperCAmelCase ) return cls return decorate class UpperCamelCase ( snake_case ): """simple docstring""" pass class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : List[str] = 1 SCREAMING_SNAKE_CASE_ : Optional[int] = 2 @contextmanager def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase=1E-1_6 ): _lowercase : List[str] = requests.Session().request def timeout_request(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): # Change the url to an invalid url so that the connection hangs _lowercase : str = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) _lowercase : Dict = timeout try: return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier _lowercase : Optional[Any] = url _lowercase : Dict = e.args[0] _lowercase : Optional[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),) _lowercase : Union[str, Any] = (max_retry_error,) raise def raise_connection_error(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): raise requests.ConnectionError("""Offline mode is enabled.""" , request=__UpperCAmelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCAmelCase ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ): _lowercase : Optional[int] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir: try: os.chdir(__UpperCAmelCase ) yield finally: os.chdir(__UpperCAmelCase ) @contextmanager def __SCREAMING_SNAKE_CASE ( ): import gc gc.collect() _lowercase : str = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __SCREAMING_SNAKE_CASE ( ): import gc gc.collect() _lowercase : Tuple = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): return deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): import decorator from requests.exceptions import HTTPError def _wrapper(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): try: return func(*__UpperCAmelCase , **__UpperCAmelCase ) except HTTPError as err: if str(__UpperCAmelCase ).startswith("""500""" ) or str(__UpperCAmelCase ).startswith("""502""" ): pytest.xfail(str(__UpperCAmelCase ) ) raise err return decorator.decorator(_wrapper , __UpperCAmelCase ) class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Optional[Any] = returncode _lowercase : int = stdout _lowercase : Tuple = stderr async def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): while True: _lowercase : Tuple = await stream.readline() if line: callback(__UpperCAmelCase ) else: break async def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False ): if echo: print("""\nRunning: """ , """ """.join(__UpperCAmelCase ) ) _lowercase : Dict = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _lowercase : str = [] _lowercase : str = [] def tee(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" ): _lowercase : List[Any] = line.decode("""utf-8""" ).rstrip() sink.append(__UpperCAmelCase ) if not quiet: print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="""stdout:""" ) ), _read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="""stderr:""" ) ), ] , timeout=__UpperCAmelCase , ) return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=180 , __UpperCAmelCase=False , __UpperCAmelCase=True ): _lowercase : List[Any] = asyncio.get_event_loop() _lowercase : int = loop.run_until_complete( _stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) ) _lowercase : str = """ """.join(__UpperCAmelCase ) if result.returncode > 0: _lowercase : Optional[int] = """\n""".join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""'{cmd_str}' produced no output.""" ) return result def __SCREAMING_SNAKE_CASE ( ): _lowercase : List[str] = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" ) _lowercase : Any = re.sub(R"""^gw""" , """""" , __UpperCAmelCase , 0 , re.M ) return int(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _lowercase : int = 29500 _lowercase : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
336
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase: Any = logging.get_logger(__name__) UpperCAmelCase: List[str] = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model" def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Tuple = intermediate_size _lowercase : List[Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = patch_size _lowercase : Optional[Any] = image_size _lowercase : Union[str, Any] = initializer_range _lowercase : Optional[Any] = attention_dropout _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : Tuple = qkv_bias @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : int = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer" def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,): super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : List[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = hidden_act _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Tuple = initializer_range _lowercase : Optional[int] = layer_norm_eps _lowercase : Any = position_embedding_type _lowercase : Dict = cross_attention_frequency _lowercase : Optional[Any] = encoder_hidden_size @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ): cls._set_token_in_kwargs(UpperCAmelCase_ ) _lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": _lowercase : str = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ ) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = "instructblip" SCREAMING_SNAKE_CASE_ : List[str] = True def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ): super().__init__(**UpperCAmelCase_ ) if vision_config is None: _lowercase : str = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: _lowercase : Any = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: _lowercase : Optional[int] = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ ) _lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ ) _lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : Union[str, Any] = self.text_config.is_encoder_decoder _lowercase : List[str] = num_query_tokens _lowercase : List[str] = self.vision_config.hidden_size _lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : Union[str, Any] = 1.0 _lowercase : Dict = 0.02 @classmethod def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,): return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,) def lowerCamelCase__ ( self ): _lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowercase : int = self.vision_config.to_dict() _lowercase : Any = self.qformer_config.to_dict() _lowercase : Any = self.text_config.to_dict() _lowercase : Optional[int] = self.__class__.model_type return output
336
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : int = BlenderbotConfig SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Tuple = "gelu" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=20 ,UpperCAmelCase_=2 ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,): _lowercase : Optional[int] = parent _lowercase : Union[str, Any] = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_labels _lowercase : int = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : List[str] = eos_token_id _lowercase : Dict = pad_token_id _lowercase : Any = bos_token_id def lowerCamelCase__ ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) _lowercase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) _lowercase : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 ) _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowercase : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) _lowercase : int = prepare_blenderbot_inputs_dict(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): _lowercase : Any = TFBlenderbotModel(config=UpperCAmelCase_ ).get_decoder() _lowercase : Union[str, Any] = inputs_dict["""input_ids"""] _lowercase : Optional[Any] = input_ids[:1, :] _lowercase : Union[str, Any] = inputs_dict["""attention_mask"""][:1, :] _lowercase : List[Any] = inputs_dict["""head_mask"""] _lowercase : Dict = 1 # first forward pass _lowercase : List[str] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,head_mask=UpperCAmelCase_ ,use_cache=UpperCAmelCase_ ) _lowercase , _lowercase : Optional[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowercase : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size ) _lowercase : str = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and _lowercase : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) _lowercase : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) _lowercase : str = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ )[0] _lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,past_key_values=UpperCAmelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice _lowercase : int = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) _lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx] _lowercase : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase_ ,UpperCAmelCase_ ,rtol=1E-3 ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ): if attention_mask is None: _lowercase : Dict = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowercase : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowercase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : List[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Optional[Any] = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : Optional[Any] = False def lowerCamelCase__ ( self ): _lowercase : List[str] = TFBlenderbotModelTester(self ) _lowercase : Dict = ConfigTester(self ,config_class=UpperCAmelCase_ ) def lowerCamelCase__ ( self ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) @require_tokenizers @require_tf class UpperCamelCase ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ["My friends are cool but they eat too many carbs."] SCREAMING_SNAKE_CASE_ : str = "facebook/blenderbot-400M-distill" @cached_property def lowerCamelCase__ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase__ ( self ): _lowercase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCamelCase__ ( self ): _lowercase : Optional[Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" ) _lowercase : Dict = self.model.generate( model_inputs.input_ids ,) _lowercase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=UpperCAmelCase_ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__UpperCAmelCase ) ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): # Base Case if index == len(__UpperCAmelCase ): return True # Recursive Step for i in range(__UpperCAmelCase ): if valid_coloring(graph[index] , __UpperCAmelCase , __UpperCAmelCase ): # Color current vertex _lowercase : Dict = i # Validate coloring if util_color(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 ): return True # Backtrack _lowercase : Dict = -1 return False def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Union[str, Any] = [-1] * len(__UpperCAmelCase ) if util_color(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 0 ): return colored_vertices return []
336
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _lowercase : Union[str, Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) _lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowercase : Dict = {"""unk_token""": """<unk>"""} _lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) _lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCAmelCase_ ) ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,**UpperCAmelCase_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) _lowercase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIn("""input_ids""" ,UpperCAmelCase_ ) self.assertIn("""attention_mask""" ,UpperCAmelCase_ ) self.assertNotIn("""labels""" ,UpperCAmelCase_ ) self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ ) @require_torch def lowerCamelCase__ ( self ): _lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : List[Any] = tokenizer( ["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" ) self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(batch.input_ids.shape ,(2, 51_22) ) @require_torch def lowerCamelCase__ ( self ): _lowercase : List[Any] = ["""A long paragraph for summarization."""] _lowercase : Dict = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" ) _lowercase : Union[str, Any] = inputs["""input_ids"""] _lowercase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowercase : str = ["""Summary of the text.""", """Another summary."""] _lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ) _lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]] _lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Dict = """A, <mask> AllenNLP sentence.""" _lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,) _lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) _lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
336
1
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
"""simple docstring""" import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Any = f.readlines() _lowercase : Optional[int] = F"""class {class_name}(""" _lowercase : List[str] = F"""{4 * " "}def {test_name}(""" _lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}""" _lowercase : int = F"""{16 * " "}{correct_line.split()[0]}""" _lowercase : str = False _lowercase : Optional[Any] = False _lowercase : Union[str, Any] = False _lowercase : Any = False _lowercase : int = 0 _lowercase : Tuple = 0 _lowercase : Union[str, Any] = [] for line in lines: if line.startswith(__UpperCAmelCase ): _lowercase : List[str] = True elif in_class and line.startswith(__UpperCAmelCase ): _lowercase : str = True elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )): _lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowercase : Optional[int] = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowercase : Optional[Any] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowercase : Union[str, Any] = False else: new_lines.append(__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ): if fail is not None: with open(__UpperCAmelCase , """r""" ) as f: _lowercase : Dict = {l.strip() for l in f.readlines()} else: _lowercase : int = None with open(__UpperCAmelCase , """r""" ) as f: _lowercase : int = f.readlines() _lowercase : int = defaultdict(__UpperCAmelCase ) for line in correct_lines: _lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase: List[Any] = argparse.ArgumentParser() parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""") parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None) UpperCAmelCase: Any = parser.parse_args() main(args.correct_filename, args.fail_filename)
336
1
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __SCREAMING_SNAKE_CASE ( ): _lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )] _lowercase : Tuple = randint(-5000 , 5000 ) return (arr, r) UpperCAmelCase: int = make_dataset() def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): for triplet in permutations(__UpperCAmelCase , 3 ): if sum(__UpperCAmelCase ) == target: return tuple(sorted(__UpperCAmelCase ) ) return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): arr.sort() _lowercase : Optional[Any] = len(__UpperCAmelCase ) for i in range(n - 1 ): _lowercase , _lowercase : str = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __SCREAMING_SNAKE_CASE ( ): _lowercase : Tuple = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ _lowercase : Union[str, Any] = """ triplet_sum1(*dataset) """ _lowercase : Union[str, Any] = """ triplet_sum2(*dataset) """ _lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) _lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 ) return (min(__UpperCAmelCase ), min(__UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase: Any = solution_times() print(F'The time for naive implementation is {times[0]}.') print(F'The time for optimized implementation is {times[1]}.')
336
"""simple docstring""" UpperCAmelCase: List[str] = """0.21.0""" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
336
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if isinstance(__UpperCAmelCase , np.ndarray ): return list(tensor.shape ) _lowercase : Union[str, Any] = tf.shape(__UpperCAmelCase ) if tensor.shape == tf.TensorShape(__UpperCAmelCase ): return dynamic _lowercase : int = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__UpperCAmelCase )] def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=__UpperCAmelCase , name=__UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-5 , __UpperCAmelCase=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized _lowercase , _lowercase : str = tf.nn.moments(__UpperCAmelCase , axes=[axis] , keepdims=__UpperCAmelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis _lowercase : int = [1] * inputs.shape.rank _lowercase : Union[str, Any] = shape_list(__UpperCAmelCase )[axis] _lowercase : Any = tf.reshape(__UpperCAmelCase , __UpperCAmelCase ) _lowercase : Any = tf.reshape(__UpperCAmelCase , __UpperCAmelCase ) # Compute layer normalization using the batch_normalization # function. _lowercase : Union[str, Any] = tf.nn.batch_normalization( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , offset=__UpperCAmelCase , scale=__UpperCAmelCase , variance_epsilon=__UpperCAmelCase , ) return outputs def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input _lowercase : Optional[int] = tf.shape(__UpperCAmelCase ) _lowercase : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) _lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(__UpperCAmelCase , __UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if not isinstance(__UpperCAmelCase , tf.Tensor ): _lowercase : Tuple = tf.convert_to_tensor(__UpperCAmelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: _lowercase : Dict = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: _lowercase : Union[str, Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) _lowercase : Union[str, Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "input_ids" ): tf.debugging.assert_less( __UpperCAmelCase , tf.cast(__UpperCAmelCase , dtype=tensor.dtype ) , message=( F"""The maximum value of {tensor_name} ({tf.math.reduce_max(__UpperCAmelCase )}) must be smaller than the embedding """ F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowercase : Optional[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. _lowercase : str = [x for x in data if len(__UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ F"""bytes: {bad_attributes}""" ) _lowercase : str = np.asarray(__UpperCAmelCase ) _lowercase : Any = 1 _lowercase : Union[str, Any] = np.array_split(__UpperCAmelCase , __UpperCAmelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 _lowercase : Any = np.array_split(__UpperCAmelCase , __UpperCAmelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__UpperCAmelCase ): _lowercase : Any = chunk_data else: _lowercase : List[str] = data def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ): if name in group.attrs: _lowercase : str = [n.decode("""utf8""" ) if hasattr(__UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]] else: _lowercase : Union[str, Any] = [] _lowercase : Any = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(__UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): def _expand_single_ad_tensor(__UpperCAmelCase ): if isinstance(__UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__UpperCAmelCase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , __UpperCAmelCase )
336
"""simple docstring""" UpperCAmelCase: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase: int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
336
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase: str = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: Tuple = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
336
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase: List[Any] = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ): _lowercase : Union[str, Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowercase : str = math.floor(val / multiple ) * multiple if x < min_val: _lowercase : Dict = math.ceil(val / multiple ) * multiple return x _lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size _lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase ) _lowercase , _lowercase : Union[str, Any] = output_size # determine new height and width _lowercase : str = output_height / input_height _lowercase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowercase : str = scale_width else: # fit height _lowercase : int = scale_height _lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase ) _lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase ) return (new_height, new_width) class UpperCamelCase ( snake_case ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"] def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): super().__init__(**UpperCAmelCase_ ) _lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowercase : str = get_size_dict(UpperCAmelCase_ ) _lowercase : Tuple = do_resize _lowercase : Any = size _lowercase : List[Any] = keep_aspect_ratio _lowercase : Any = ensure_multiple_of _lowercase : str = resample _lowercase : Optional[Any] = do_rescale _lowercase : List[Any] = rescale_factor _lowercase : Union[str, Any] = do_normalize _lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): _lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _lowercase : Dict = get_resize_output_image_size( UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,) return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,): return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,): _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : List[str] = size if size is not None else self.size _lowercase : int = get_size_dict(UpperCAmelCase_ ) _lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowercase : List[str] = resample if resample is not None else self.resample _lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : str = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean _lowercase : int = image_std if image_std is not None else self.image_std _lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ ) if not valid_images(UpperCAmelCase_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images] if do_resize: _lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images] if do_rescale: _lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images] if do_normalize: _lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images] _lowercase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ): _lowercase : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCAmelCase_ ): _lowercase : Tuple = target_sizes.numpy() _lowercase : Optional[Any] = [] for idx in range(len(UpperCAmelCase_ ) ): _lowercase : Dict = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ ) _lowercase : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase_ ) else: _lowercase : Union[str, Any] = logits.argmax(dim=1 ) _lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
336
1